diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8316d7dc16..bd5803a70f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -4,7 +4,7 @@ on: push: branches: - master - - llvm18 + - llvm19 concurrency: # Cancels pending runs when a PR gets updated. group: ${{ github.head_ref || github.run_id }}-${{ github.actor }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 781076ef61..ebdce53228 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,9 +137,9 @@ else() set(ZIG_SYSTEM_LIBCXX "stdc++" CACHE STRING "system libcxx name for build.zig") endif() -find_package(llvm 18) -find_package(clang 18) -find_package(lld 18) +find_package(llvm 19) +find_package(clang 19) +find_package(lld 19) if(ZIG_STATIC_ZLIB) if (MSVC) @@ -834,6 +834,11 @@ else() endif() endif() +option(ZIG2_NO_RTLIB "Build zig2 without linking to a compiler runtime library (for `zig cc` only)" OFF) +if(ZIG2_NO_RTLIB) + set(ZIG2_LINK_FLAGS "${ZIG2_LINK_FLAGS} -rtlib=none") +endif() + set(ZIG1_WASM_MODULE "${PROJECT_SOURCE_DIR}/stage1/zig1.wasm") set(ZIG1_C_SOURCE "${PROJECT_BINARY_DIR}/zig1.c") set(ZIG2_C_SOURCE "${PROJECT_BINARY_DIR}/zig2.c") @@ -889,9 +894,7 @@ set(BUILD_COMPILER_RT_ARGS --name compiler_rt -femit-bin="${ZIG_COMPILER_RT_C_SOURCE}" -target "${ZIG_HOST_TARGET_TRIPLE}" - --dep "build_options" "-Mroot=lib/compiler_rt.zig" - "-Mbuild_options=${ZIG_CONFIG_ZIG_OUT}" ) add_custom_command( diff --git a/bootstrap.c b/bootstrap.c index fca6ce1081..e1684df19a 100644 --- a/bootstrap.c +++ b/bootstrap.c @@ -170,9 +170,7 @@ int main(int argc, char **argv) { "-ofmt=c", "-OReleaseSmall", "--name", "compiler_rt", "-femit-bin=compiler_rt.c", "-target", host_triple, - "--dep", "build_options", "-Mroot=lib/compiler_rt.zig", - "-Mbuild_options=config.zig", NULL, }; print_and_run(child_argv); diff --git a/build.zig b/build.zig index 264cbe0405..731b825c07 100644 --- a/build.zig +++ b/build.zig @@ -1099,6 +1099,8 @@ const clang_libs = [_][]const u8{ "clangToolingCore", "clangExtractAPI", "clangSupport", + "clangInstallAPI", + "clangAST", }; const lld_libs = [_][]const u8{ "lldMinGW", @@ -1120,6 +1122,7 @@ const llvm_libs = [_][]const u8{ "LLVMTextAPIBinaryReader", "LLVMCoverage", "LLVMLineEditor", + "LLVMSandboxIR", "LLVMXCoreDisassembler", "LLVMXCoreCodeGen", "LLVMXCoreDesc", @@ -1255,6 +1258,7 @@ const llvm_libs = [_][]const u8{ "LLVMDWARFLinkerParallel", "LLVMDWARFLinkerClassic", "LLVMDWARFLinker", + "LLVMCodeGenData", "LLVMGlobalISel", "LLVMMIRParser", "LLVMAsmPrinter", diff --git a/ci/aarch64-linux-debug.sh b/ci/aarch64-linux-debug.sh index 27effea36a..bb99a3044a 100644 --- a/ci/aarch64-linux-debug.sh +++ b/ci/aarch64-linux-debug.sh @@ -8,7 +8,7 @@ set -e ARCH="$(uname -m)" TARGET="$ARCH-linux-musl" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.418+ebd9efa85" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/deps/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/aarch64-linux-release.sh b/ci/aarch64-linux-release.sh index 947694ce39..5f43bfcc5f 100644 --- a/ci/aarch64-linux-release.sh +++ b/ci/aarch64-linux-release.sh @@ -8,7 +8,7 @@ set -e ARCH="$(uname -m)" TARGET="$ARCH-linux-musl" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.418+ebd9efa85" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/deps/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/aarch64-macos-debug.sh b/ci/aarch64-macos-debug.sh index 9283321c1f..4d44401a99 100755 --- a/ci/aarch64-macos-debug.sh +++ b/ci/aarch64-macos-debug.sh @@ -9,7 +9,7 @@ set -e ZIGDIR="$PWD" TARGET="$ARCH-macos-none" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/aarch64-macos-release.sh b/ci/aarch64-macos-release.sh index 396aa1d464..e8ce56eaea 100755 --- a/ci/aarch64-macos-release.sh +++ b/ci/aarch64-macos-release.sh @@ -9,7 +9,7 @@ set -e ZIGDIR="$PWD" TARGET="$ARCH-macos-none" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/aarch64-windows.ps1 b/ci/aarch64-windows.ps1 index bf68a6443c..b0e7c474ca 100644 --- a/ci/aarch64-windows.ps1 +++ b/ci/aarch64-windows.ps1 @@ -1,5 +1,5 @@ $TARGET = "$($Env:ARCH)-windows-gnu" -$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" $MCPU = "baseline" $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip" $PREFIX_PATH = "$(Get-Location)\..\$ZIG_LLVM_CLANG_LLD_NAME" diff --git a/ci/x86_64-linux-debug.sh b/ci/x86_64-linux-debug.sh index 370100e68e..4ad0fa7061 100755 --- a/ci/x86_64-linux-debug.sh +++ b/ci/x86_64-linux-debug.sh @@ -8,7 +8,7 @@ set -e ARCH="$(uname -m)" TARGET="$ARCH-linux-musl" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.418+ebd9efa85" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/deps/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/x86_64-linux-release.sh b/ci/x86_64-linux-release.sh index 8ee7f79412..c4d9c9ddb7 100755 --- a/ci/x86_64-linux-release.sh +++ b/ci/x86_64-linux-release.sh @@ -8,7 +8,7 @@ set -e ARCH="$(uname -m)" TARGET="$ARCH-linux-musl" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.418+ebd9efa85" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/deps/$CACHE_BASENAME" ZIG="$PREFIX/bin/zig" diff --git a/ci/x86_64-macos-release.sh b/ci/x86_64-macos-release.sh index acf2b91d9a..838952b181 100755 --- a/ci/x86_64-macos-release.sh +++ b/ci/x86_64-macos-release.sh @@ -6,7 +6,7 @@ set -e ZIGDIR="$PWD" TARGET="$ARCH-macos-none" MCPU="baseline" -CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" PREFIX="$HOME/$CACHE_BASENAME" JOBS="-j3" ZIG="$PREFIX/bin/zig" diff --git a/ci/x86_64-windows-debug.ps1 b/ci/x86_64-windows-debug.ps1 index db8be82d89..8fd4db222c 100644 --- a/ci/x86_64-windows-debug.ps1 +++ b/ci/x86_64-windows-debug.ps1 @@ -1,5 +1,5 @@ $TARGET = "$($Env:ARCH)-windows-gnu" -$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" $MCPU = "baseline" $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip" $PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME" diff --git a/ci/x86_64-windows-release.ps1 b/ci/x86_64-windows-release.ps1 index 3d8938ce19..432d0649be 100644 --- a/ci/x86_64-windows-release.ps1 +++ b/ci/x86_64-windows-release.ps1 @@ -1,5 +1,5 @@ $TARGET = "$($Env:ARCH)-windows-gnu" -$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.13.0-dev.130+98a30acad" +$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.14.0-dev.1622+2ac543388" $MCPU = "baseline" $ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip" $PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME" diff --git a/cmake/Findclang.cmake b/cmake/Findclang.cmake index c21fe18837..a95a8c903b 100644 --- a/cmake/Findclang.cmake +++ b/cmake/Findclang.cmake @@ -17,10 +17,10 @@ find_path(CLANG_INCLUDE_DIRS NAMES clang/Frontend/ASTUnit.h if(${LLVM_LINK_MODE} STREQUAL "shared") find_library(CLANG_LIBRARIES NAMES - libclang-cpp.so.18 - libclang-cpp.so.18.1 - clang-cpp-18.0 - clang-cpp180 + libclang-cpp.so.19 + libclang-cpp.so.19.1 + clang-cpp-19.0 + clang-cpp190 clang-cpp NAMES_PER_DIR HINTS "${LLVM_LIBDIRS}" @@ -68,6 +68,8 @@ else() FIND_AND_ADD_CLANG_LIB(clangToolingCore) FIND_AND_ADD_CLANG_LIB(clangExtractAPI) FIND_AND_ADD_CLANG_LIB(clangSupport) + FIND_AND_ADD_CLANG_LIB(clangInstallAPI) + FIND_AND_ADD_CLANG_LIB(clangAST) endif() if (MSVC) diff --git a/cmake/Findlld.cmake b/cmake/Findlld.cmake index abe29b5fce..7c86aaed07 100644 --- a/cmake/Findlld.cmake +++ b/cmake/Findlld.cmake @@ -9,21 +9,21 @@ find_path(LLD_INCLUDE_DIRS NAMES lld/Common/Driver.h HINTS ${LLVM_INCLUDE_DIRS} PATHS - /usr/lib/llvm-18/include - /usr/local/llvm180/include - /usr/local/llvm18/include - /usr/local/opt/llvm@18/include - /opt/homebrew/opt/llvm@18/include + /usr/lib/llvm-19/include + /usr/local/llvm190/include + /usr/local/llvm19/include + /usr/local/opt/llvm@19/include + /opt/homebrew/opt/llvm@19/include /mingw64/include) -find_library(LLD_LIBRARY NAMES lld-18.0 lld180 lld NAMES_PER_DIR +find_library(LLD_LIBRARY NAMES lld-19.0 lld190 lld NAMES_PER_DIR HINTS ${LLVM_LIBDIRS} PATHS - /usr/lib/llvm-18/lib - /usr/local/llvm180/lib - /usr/local/llvm18/lib - /usr/local/opt/llvm@18/lib - /opt/homebrew/opt/llvm@18/lib + /usr/lib/llvm-19/lib + /usr/local/llvm190/lib + /usr/local/llvm19/lib + /usr/local/opt/llvm@19/lib + /opt/homebrew/opt/llvm@19/lib ) if(EXISTS ${LLD_LIBRARY}) set(LLD_LIBRARIES ${LLD_LIBRARY}) @@ -34,11 +34,11 @@ else() HINTS ${LLVM_LIBDIRS} PATHS ${LLD_LIBDIRS} - /usr/lib/llvm-18/lib - /usr/local/llvm180/lib - /usr/local/llvm18/lib - /usr/local/opt/llvm@18/lib - /opt/homebrew/opt/llvm@18/lib + /usr/lib/llvm-19/lib + /usr/local/llvm190/lib + /usr/local/llvm19/lib + /usr/local/opt/llvm@19/lib + /opt/homebrew/opt/llvm@19/lib /mingw64/lib /c/msys64/mingw64/lib c:/msys64/mingw64/lib) diff --git a/cmake/Findllvm.cmake b/cmake/Findllvm.cmake index c4eb49fe76..5bc874ec42 100644 --- a/cmake/Findllvm.cmake +++ b/cmake/Findllvm.cmake @@ -17,12 +17,12 @@ if(ZIG_USE_LLVM_CONFIG) # terminate when the right LLVM version is not found. unset(LLVM_CONFIG_EXE CACHE) find_program(LLVM_CONFIG_EXE - NAMES llvm-config-18 llvm-config-18.0 llvm-config180 llvm-config18 llvm-config NAMES_PER_DIR + NAMES llvm-config-19 llvm-config-19.0 llvm-config190 llvm-config19 llvm-config NAMES_PER_DIR PATHS "/mingw64/bin" "/c/msys64/mingw64/bin" "c:/msys64/mingw64/bin" - "C:/Libraries/llvm-18.0.0/bin") + "C:/Libraries/llvm-19.0.0/bin") if ("${LLVM_CONFIG_EXE}" STREQUAL "LLVM_CONFIG_EXE-NOTFOUND") if (NOT LLVM_CONFIG_ERROR_MESSAGES STREQUAL "") @@ -40,9 +40,9 @@ if(ZIG_USE_LLVM_CONFIG) OUTPUT_STRIP_TRAILING_WHITESPACE) get_filename_component(LLVM_CONFIG_DIR "${LLVM_CONFIG_EXE}" DIRECTORY) - if("${LLVM_CONFIG_VERSION}" VERSION_LESS 18 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 19 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 19) + if("${LLVM_CONFIG_VERSION}" VERSION_LESS 19 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 20 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 20) # Save the error message, in case this is the last llvm-config we find - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 18.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 19.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}") # Ignore this directory and try the search again list(APPEND CMAKE_IGNORE_PATH "${LLVM_CONFIG_DIR}") @@ -63,12 +63,12 @@ if(ZIG_USE_LLVM_CONFIG) ERROR_VARIABLE LLVM_CONFIG_ERROR ERROR_STRIP_TRAILING_WHITESPACE) - if (LLVM_CONFIG_ERROR) + if (LLVM_CONFIG_ERROR) # Save the error message, in case this is the last llvm-config we find if (ZIG_SHARED_LLVM) - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 18.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 19.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library") else() - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 18.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 19.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library") endif() # Ignore this directory and try the search again @@ -200,6 +200,7 @@ else() FIND_AND_ADD_LLVM_LIB(LLVMTextAPIBinaryReader) FIND_AND_ADD_LLVM_LIB(LLVMCoverage) FIND_AND_ADD_LLVM_LIB(LLVMLineEditor) + FIND_AND_ADD_LLVM_LIB(LLVMSandboxIR) FIND_AND_ADD_LLVM_LIB(LLVMXCoreDisassembler) FIND_AND_ADD_LLVM_LIB(LLVMXCoreCodeGen) FIND_AND_ADD_LLVM_LIB(LLVMXCoreDesc) @@ -335,6 +336,7 @@ else() FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinkerParallel) FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinkerClassic) FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinker) + FIND_AND_ADD_LLVM_LIB(LLVMCodeGenData) FIND_AND_ADD_LLVM_LIB(LLVMGlobalISel) FIND_AND_ADD_LLVM_LIB(LLVMMIRParser) FIND_AND_ADD_LLVM_LIB(LLVMAsmPrinter) diff --git a/lib/compiler/aro/aro/target.zig b/lib/compiler/aro/aro/target.zig index 7b2e1576bf..b84788429a 100644 --- a/lib/compiler/aro/aro/target.zig +++ b/lib/compiler/aro/aro/target.zig @@ -658,6 +658,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .shadermodel => "shadermodel", .visionos => "xros", .serenity => "serenity", + .bridgeos => "bridgeos", .opencl, .opengl, .vulkan, diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig index 1dfd4be07f..63fc48cbed 100644 --- a/lib/compiler_rt/common.zig +++ b/lib/compiler_rt/common.zig @@ -1,8 +1,14 @@ const std = @import("std"); const builtin = @import("builtin"); const native_endian = builtin.cpu.arch.endian(); +const ofmt_c = builtin.object_format == .c; -pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .internal else .weak; +pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) + .internal +else if (ofmt_c) + .strong +else + .weak; /// Determines the symbol's visibility to other objects. /// For WebAssembly this allows the symbol to be resolved to other modules, but will not /// export it to the host runtime. @@ -28,7 +34,7 @@ pub const want_float_exceptions = !builtin.cpu.arch.isWasm(); // Libcalls that involve u128 on Windows x86-64 are expected by LLVM to use the // calling convention of @Vector(2, u64), rather than what's standard. -pub const want_windows_v2u64_abi = builtin.os.tag == .windows and builtin.cpu.arch == .x86_64 and @import("builtin").object_format != .c; +pub const want_windows_v2u64_abi = builtin.os.tag == .windows and builtin.cpu.arch == .x86_64 and !ofmt_c; /// This governs whether to use these symbol names for f16/f32 conversions /// rather than the standard names: diff --git a/lib/include/__clang_cuda_intrinsics.h b/lib/include/__clang_cuda_intrinsics.h index 3c3948863c..a04e8b6de4 100644 --- a/lib/include/__clang_cuda_intrinsics.h +++ b/lib/include/__clang_cuda_intrinsics.h @@ -215,9 +215,7 @@ inline __device__ unsigned int __activemask() { #if CUDA_VERSION < 9020 return __nvvm_vote_ballot(1); #else - unsigned int mask; - asm volatile("activemask.b32 %0;" : "=r"(mask)); - return mask; + return __nvvm_activemask(); #endif } diff --git a/lib/include/__stdarg_header_macro.h b/lib/include/__stdarg_header_macro.h new file mode 100644 index 0000000000..beb92ee025 --- /dev/null +++ b/lib/include/__stdarg_header_macro.h @@ -0,0 +1,12 @@ +/*===---- __stdarg_header_macro.h ------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDARG_H +#define __STDARG_H +#endif diff --git a/lib/include/__stddef_header_macro.h b/lib/include/__stddef_header_macro.h new file mode 100644 index 0000000000..db5fb3c0ab --- /dev/null +++ b/lib/include/__stddef_header_macro.h @@ -0,0 +1,12 @@ +/*===---- __stddef_header_macro.h ------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDDEF_H +#define __STDDEF_H +#endif diff --git a/lib/include/arm_acle.h b/lib/include/arm_acle.h index 9cd34948e3..1518b0c4c8 100644 --- a/lib/include/arm_acle.h +++ b/lib/include/arm_acle.h @@ -75,6 +75,14 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v #define __dbg(t) __builtin_arm_dbg(t) #endif +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +#define _CHKFEAT_GCS 1 +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__chkfeat(uint64_t __features) { + return __builtin_arm_chkfeat(__features) ^ __features; +} +#endif + /* 7.5 Swap */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __swp(uint32_t __x, volatile uint32_t *__p) { @@ -109,7 +117,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) { #endif /* 7.7 NOP */ -#if !defined(_MSC_VER) || !defined(__aarch64__) +#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__)) static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { __builtin_arm_nop(); } @@ -313,7 +321,7 @@ __qdbl(int32_t __t) { } #endif -/* 8.4.3 Accumultating multiplications */ +/* 8.4.3 Accumulating multiplications */ #if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlabb(int32_t __a, int32_t __b, int32_t __c) { @@ -545,7 +553,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) { } #endif -/* 8.5.10 Parallel 16-bit multiplications */ +/* 8.5.10 Parallel 16-bit multiplication */ #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { @@ -748,7 +756,7 @@ __arm_st64bv0(void *__addr, data512_t __value) { #define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) -/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */ +/* 10.3 MTE intrinsics */ #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE #define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) #define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) @@ -757,7 +765,7 @@ __arm_st64bv0(void *__addr, data512_t __value) { #define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) -/* 18 Memory Operations Intrinsics */ +/* 18 memcpy family of operations intrinsics - MOPS */ #define __arm_mops_memset_tag(__tagged_address, __value, __size) \ __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) #endif @@ -855,6 +863,24 @@ __rndrrs(uint64_t *__p) { } #endif +/* 11.2 Guarded Control Stack intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ void * __attribute__((__always_inline__, __nodebug__)) +__gcspr() { + return (void *)__builtin_arm_rsr64("gcspr_el0"); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcspopm() { + return __builtin_arm_gcspopm(0); +} + +static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcsss(const void *__stack) { + return __builtin_arm_gcsss(__stack); +} +#endif + #if defined(__cplusplus) } #endif diff --git a/lib/include/arm_fp16.h b/lib/include/arm_fp16.h index f114c6997b..2dd0653ab0 100644 --- a/lib/include/arm_fp16.h +++ b/lib/include/arm_fp16.h @@ -29,7 +29,7 @@ typedef __fp16 float16_t; #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #define vabdh_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ diff --git a/lib/include/arm_neon.h b/lib/include/arm_neon.h index 97431fccab..b67616134b 100644 --- a/lib/include/arm_neon.h +++ b/lib/include/arm_neon.h @@ -28,15 +28,11 @@ #error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" #else -#if !defined(__ARM_NEON) -#error "NEON support not enabled" -#else - #include #include #include -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef uint8_t poly8_t; typedef uint16_t poly16_t; typedef uint64_t poly64_t; @@ -127,6 +123,3193 @@ typedef struct poly64x2x4_t { #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ + __ret; \ +}) +__ai __attribute__((target("bf16,neon"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = (int32_t)(*(int16_t *) &__reint) << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_bf16(__p0_0, __p1_0) __extension__ ({ \ + bfloat16x8_t __ret_0; \ + bfloat16x4_t __s0_0 = __p0_0; \ + __ret_0 = splatq_lane_bf16(__s0_0, __p1_0); \ + __ret_0; \ +}) +#else +#define vdupq_lane_bf16(__p0_1, __p1_1) __extension__ ({ \ + bfloat16x8_t __ret_1; \ + bfloat16x4_t __s0_1 = __p0_1; \ + bfloat16x4_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 3, 2, 1, 0); \ + __ret_1 = __noswap_splatq_lane_bf16(__rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_bf16(__p0_2, __p1_2) __extension__ ({ \ + bfloat16x4_t __ret_2; \ + bfloat16x4_t __s0_2 = __p0_2; \ + __ret_2 = splat_lane_bf16(__s0_2, __p1_2); \ + __ret_2; \ +}) +#else +#define vdup_lane_bf16(__p0_3, __p1_3) __extension__ ({ \ + bfloat16x4_t __ret_3; \ + bfloat16x4_t __s0_3 = __p0_3; \ + bfloat16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ + __ret_3 = __noswap_splat_lane_bf16(__rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_bf16(__p0_4, __p1_4) __extension__ ({ \ + bfloat16x8_t __ret_4; \ + bfloat16x8_t __s0_4 = __p0_4; \ + __ret_4 = splatq_laneq_bf16(__s0_4, __p1_4); \ + __ret_4; \ +}) +#else +#define vdupq_laneq_bf16(__p0_5, __p1_5) __extension__ ({ \ + bfloat16x8_t __ret_5; \ + bfloat16x8_t __s0_5 = __p0_5; \ + bfloat16x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5 = __noswap_splatq_laneq_bf16(__rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_6, __p1_6) __extension__ ({ \ + bfloat16x4_t __ret_6; \ + bfloat16x8_t __s0_6 = __p0_6; \ + __ret_6 = splat_laneq_bf16(__s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vdup_laneq_bf16(__p0_7, __p1_7) __extension__ ({ \ + bfloat16x4_t __ret_7; \ + bfloat16x8_t __s0_7 = __p0_7; \ + bfloat16x8_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_7 = __noswap_splat_laneq_bf16(__rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \ + __ret_7; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ + __ret; \ +}) +#else +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ + __ret; \ +}) +#else +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ + __ret; \ +}) +#else +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ + __ret; \ +}) +#else +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ + __ret; \ +}) +#else +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ + __ret; \ +}) +#else +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ + __ret; \ +}) +#else +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ + __ret; \ +}) +#else +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ +}) +#else +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ +}) +#else +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ +}) +#else +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ +}) +#else +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) +#else +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) +#else +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) +#else +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) +#else +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm,neon"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm,neon"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + #ifdef __LITTLE_ENDIAN__ #define splat_lane_p8(__p0, __p1) __extension__ ({ \ poly8x8_t __ret; \ @@ -1392,13 +4575,13 @@ typedef struct poly64x2x4_t { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1406,7 +4589,7 @@ __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; @@ -1414,13 +4597,13 @@ __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1428,7 +4611,7 @@ __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; @@ -1436,13 +4619,13 @@ __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1450,7 +4633,7 @@ __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; @@ -1458,13 +4641,13 @@ __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1472,7 +4655,7 @@ __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; @@ -1480,13 +4663,13 @@ __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1497,13 +4680,13 @@ __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1511,7 +4694,7 @@ __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; @@ -1519,13 +4702,13 @@ __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1533,7 +4716,7 @@ __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; @@ -1541,13 +4724,13 @@ __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1555,7 +4738,7 @@ __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; @@ -1563,13 +4746,13 @@ __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -1577,7 +4760,7 @@ __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; @@ -1585,13 +4768,13 @@ __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1599,7 +4782,7 @@ __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; @@ -1607,13 +4790,13 @@ __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1621,7 +4804,7 @@ __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; @@ -1629,13 +4812,13 @@ __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -1646,13 +4829,13 @@ __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -1660,7 +4843,7 @@ __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; @@ -1668,13 +4851,13 @@ __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1682,7 +4865,7 @@ __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; @@ -1690,13 +4873,13 @@ __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vabsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); @@ -1706,13 +4889,13 @@ __ai int8x16_t vabsq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabsq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vabsq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); @@ -1722,13 +4905,13 @@ __ai float32x4_t vabsq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vabsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); @@ -1738,13 +4921,13 @@ __ai int32x4_t vabsq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vabsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); @@ -1754,13 +4937,13 @@ __ai int16x8_t vabsq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabs_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vabs_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); @@ -1770,13 +4953,13 @@ __ai int8x8_t vabs_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabs_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vabs_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); @@ -1786,13 +4969,13 @@ __ai float32x2_t vabs_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabs_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vabs_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); @@ -1802,13 +4985,13 @@ __ai int32x2_t vabs_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabs_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vabs_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); @@ -1818,13 +5001,13 @@ __ai int16x4_t vabs_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1835,13 +5018,13 @@ __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1852,13 +5035,13 @@ __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -1869,13 +5052,13 @@ __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1886,13 +5069,13 @@ __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1903,13 +5086,13 @@ __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1920,13 +5103,13 @@ __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -1937,13 +5120,13 @@ __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -1954,13 +5137,13 @@ __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1971,13 +5154,13 @@ __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -1988,13 +5171,13 @@ __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2004,19 +5187,19 @@ __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 + __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2027,13 +5210,13 @@ __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2044,13 +5227,13 @@ __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2061,13 +5244,13 @@ __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2077,19 +5260,19 @@ __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 + __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2100,13 +5283,13 @@ __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2116,19 +5299,19 @@ __ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { } #endif -__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x1_t __ret; __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2139,13 +5322,13 @@ __ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2156,13 +5339,13 @@ __ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); return __ret; } #else -__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2173,13 +5356,13 @@ __ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else -__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2190,13 +5373,13 @@ __ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2204,7 +5387,7 @@ __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; @@ -2212,13 +5395,13 @@ __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2226,7 +5409,7 @@ __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; @@ -2234,13 +5417,13 @@ __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2248,7 +5431,7 @@ __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; @@ -2256,13 +5439,13 @@ __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2270,7 +5453,7 @@ __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; @@ -2278,13 +5461,13 @@ __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2292,7 +5475,7 @@ __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; @@ -2300,13 +5483,13 @@ __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2314,7 +5497,7 @@ __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; @@ -2322,13 +5505,13 @@ __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2339,13 +5522,13 @@ __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2356,13 +5539,13 @@ __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2373,13 +5556,13 @@ __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2390,13 +5573,13 @@ __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2407,13 +5590,13 @@ __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2424,13 +5607,13 @@ __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2441,13 +5624,13 @@ __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2458,13 +5641,13 @@ __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2475,13 +5658,13 @@ __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2491,19 +5674,19 @@ __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 & __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2514,13 +5697,13 @@ __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2531,13 +5714,13 @@ __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2547,19 +5730,19 @@ __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 & __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 & __p1; return __ret; } #else -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2570,13 +5753,13 @@ __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2587,13 +5770,13 @@ __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2604,13 +5787,13 @@ __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2621,13 +5804,13 @@ __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2638,13 +5821,13 @@ __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2655,13 +5838,13 @@ __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2672,13 +5855,13 @@ __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2689,13 +5872,13 @@ __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2706,13 +5889,13 @@ __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2723,13 +5906,13 @@ __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2739,19 +5922,19 @@ __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 & ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2762,13 +5945,13 @@ __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2779,13 +5962,13 @@ __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2795,19 +5978,19 @@ __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 & ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 & ~__p1; return __ret; } #else -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2818,13 +6001,13 @@ __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2836,13 +6019,13 @@ __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); return __ret; } #else -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2854,13 +6037,13 @@ __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2872,13 +6055,13 @@ __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); return __ret; } #else -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2890,13 +6073,13 @@ __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2908,13 +6091,13 @@ __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2926,13 +6109,13 @@ __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -2944,13 +6127,13 @@ __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2962,13 +6145,13 @@ __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -2980,13 +6163,13 @@ __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -2998,13 +6181,13 @@ __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3016,13 +6199,13 @@ __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3034,13 +6217,13 @@ __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3052,13 +6235,13 @@ __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3070,13 +6253,13 @@ __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #else -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3087,19 +6270,19 @@ __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { } #endif -__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { +__ai __attribute__((target("neon"))) uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); return __ret; } #else -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3111,13 +6294,13 @@ __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3129,13 +6312,13 @@ __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3147,13 +6330,13 @@ __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3164,19 +6347,19 @@ __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { } #endif -__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { +__ai __attribute__((target("neon"))) int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3188,13 +6371,49 @@ __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3205,13 +6424,13 @@ __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3222,13 +6441,13 @@ __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3239,13 +6458,13 @@ __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3256,13 +6475,13 @@ __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3273,13 +6492,13 @@ __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3290,13 +6509,13 @@ __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3307,13 +6526,13 @@ __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3324,13 +6543,13 @@ __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3341,13 +6560,13 @@ __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3358,13 +6577,13 @@ __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3375,13 +6594,13 @@ __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3392,13 +6611,13 @@ __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3409,13 +6628,13 @@ __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 == __p1); return __ret; } #else -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3426,13 +6645,13 @@ __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3443,13 +6662,13 @@ __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 == __p1); return __ret; } #else -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3460,13 +6679,13 @@ __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3477,13 +6696,13 @@ __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3494,13 +6713,13 @@ __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3511,13 +6730,13 @@ __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3528,13 +6747,13 @@ __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3545,13 +6764,13 @@ __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3562,13 +6781,13 @@ __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3579,13 +6798,13 @@ __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3596,13 +6815,13 @@ __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 >= __p1); return __ret; } #else -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3613,13 +6832,13 @@ __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3630,13 +6849,13 @@ __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3647,13 +6866,13 @@ __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 >= __p1); return __ret; } #else -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3664,13 +6883,13 @@ __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3681,13 +6900,13 @@ __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3698,13 +6917,13 @@ __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3715,13 +6934,13 @@ __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 >= __p1); return __ret; } #else -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3732,13 +6951,13 @@ __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3749,13 +6968,13 @@ __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3766,13 +6985,13 @@ __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 >= __p1); return __ret; } #else -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3783,13 +7002,13 @@ __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3800,13 +7019,13 @@ __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3817,13 +7036,13 @@ __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3834,13 +7053,13 @@ __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 > __p1); return __ret; } #else -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3851,13 +7070,13 @@ __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3868,13 +7087,13 @@ __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3885,13 +7104,13 @@ __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 > __p1); return __ret; } #else -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3902,13 +7121,13 @@ __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3919,13 +7138,13 @@ __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 > __p1); return __ret; } #else -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -3936,13 +7155,13 @@ __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3953,13 +7172,13 @@ __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 > __p1); return __ret; } #else -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -3970,13 +7189,13 @@ __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -3987,13 +7206,13 @@ __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4004,13 +7223,13 @@ __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 > __p1); return __ret; } #else -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4021,13 +7240,13 @@ __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4038,13 +7257,13 @@ __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4055,13 +7274,13 @@ __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4072,13 +7291,13 @@ __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 <= __p1); return __ret; } #else -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4089,13 +7308,13 @@ __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4106,13 +7325,13 @@ __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4123,13 +7342,13 @@ __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 <= __p1); return __ret; } #else -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4140,13 +7359,13 @@ __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4157,13 +7376,13 @@ __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4174,13 +7393,13 @@ __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4191,13 +7410,13 @@ __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 <= __p1); return __ret; } #else -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4208,13 +7427,13 @@ __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4225,13 +7444,13 @@ __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4242,13 +7461,13 @@ __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 <= __p1); return __ret; } #else -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4259,13 +7478,13 @@ __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4276,13 +7495,13 @@ __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4293,13 +7512,13 @@ __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4310,13 +7529,13 @@ __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclsq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vclsq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { int8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); @@ -4326,13 +7545,13 @@ __ai int8x16_t vclsq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclsq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vclsq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { int32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); @@ -4342,13 +7561,13 @@ __ai int32x4_t vclsq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclsq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vclsq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { int16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); @@ -4358,13 +7577,13 @@ __ai int16x8_t vclsq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vclsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); @@ -4374,13 +7593,13 @@ __ai int8x16_t vclsq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vclsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); @@ -4390,13 +7609,13 @@ __ai int32x4_t vclsq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vclsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); @@ -4406,13 +7625,13 @@ __ai int16x8_t vclsq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcls_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vcls_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { int8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); @@ -4422,13 +7641,13 @@ __ai int8x8_t vcls_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcls_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcls_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { int32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); @@ -4438,13 +7657,13 @@ __ai int32x2_t vcls_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcls_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vcls_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { int16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); @@ -4454,13 +7673,13 @@ __ai int16x4_t vcls_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcls_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vcls_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); @@ -4470,13 +7689,13 @@ __ai int8x8_t vcls_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcls_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcls_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); @@ -4486,13 +7705,13 @@ __ai int32x2_t vcls_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcls_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vcls_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); @@ -4502,13 +7721,13 @@ __ai int16x4_t vcls_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 < __p1); return __ret; } #else -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4519,13 +7738,13 @@ __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4536,13 +7755,13 @@ __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4553,13 +7772,13 @@ __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0 < __p1); return __ret; } #else -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4570,13 +7789,13 @@ __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4587,13 +7806,13 @@ __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4604,13 +7823,13 @@ __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4621,13 +7840,13 @@ __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4638,13 +7857,13 @@ __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4655,13 +7874,13 @@ __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4672,13 +7891,13 @@ __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -4689,13 +7908,13 @@ __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4706,13 +7925,13 @@ __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -4723,13 +7942,13 @@ __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -4740,13 +7959,13 @@ __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); @@ -4756,13 +7975,13 @@ __ai uint8x16_t vclzq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); @@ -4772,13 +7991,13 @@ __ai uint32x4_t vclzq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); @@ -4788,13 +8007,13 @@ __ai uint16x8_t vclzq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vclzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); @@ -4804,13 +8023,13 @@ __ai int8x16_t vclzq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vclzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); @@ -4820,13 +8039,13 @@ __ai int32x4_t vclzq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vclzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); @@ -4836,13 +8055,13 @@ __ai int16x8_t vclzq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); @@ -4852,13 +8071,13 @@ __ai uint8x8_t vclz_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); @@ -4868,13 +8087,13 @@ __ai uint32x2_t vclz_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); @@ -4884,13 +8103,13 @@ __ai uint16x4_t vclz_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vclz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vclz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); @@ -4900,13 +8119,13 @@ __ai int8x8_t vclz_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vclz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vclz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); @@ -4916,13 +8135,13 @@ __ai int32x2_t vclz_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vclz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vclz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); @@ -4932,13 +8151,13 @@ __ai int16x4_t vclz_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); return __ret; } #else -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); @@ -4948,13 +8167,13 @@ __ai poly8x8_t vcnt_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); return __ret; } #else -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); @@ -4964,13 +8183,13 @@ __ai poly8x16_t vcntq_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); @@ -4980,13 +8199,13 @@ __ai uint8x16_t vcntq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcntq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vcntq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); @@ -4996,13 +8215,13 @@ __ai int8x16_t vcntq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); @@ -5012,13 +8231,13 @@ __ai uint8x8_t vcnt_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcnt_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vcnt_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); @@ -5028,13 +8247,13 @@ __ai int8x8_t vcnt_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x16_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -5045,13 +8264,13 @@ __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x8_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -5062,13 +8281,13 @@ __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -5076,7 +8295,7 @@ __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; @@ -5084,13 +8303,13 @@ __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -5098,7 +8317,7 @@ __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; @@ -5106,13 +8325,13 @@ __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -5121,13 +8340,13 @@ __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -5135,7 +8354,7 @@ __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; @@ -5143,13 +8362,13 @@ __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -5157,7 +8376,7 @@ __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; @@ -5165,13 +8384,13 @@ __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -5179,7 +8398,7 @@ __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; @@ -5187,13 +8406,13 @@ __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -5201,7 +8420,7 @@ __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; @@ -5209,13 +8428,13 @@ __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -5223,7 +8442,7 @@ __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; @@ -5231,13 +8450,13 @@ __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -5246,13 +8465,13 @@ __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -5260,7 +8479,7 @@ __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; @@ -5340,13 +8559,13 @@ __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); @@ -5356,13 +8575,13 @@ __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { float32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); @@ -5372,13 +8591,13 @@ __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { float32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); @@ -5388,13 +8607,13 @@ __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { float32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); @@ -5548,13 +8767,13 @@ __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); @@ -5564,13 +8783,13 @@ __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); @@ -5580,13 +8799,13 @@ __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); @@ -5596,13 +8815,13 @@ __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); @@ -5612,419 +8831,419 @@ __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \ - poly8x8_t __ret_0; \ - poly8x8_t __s0_0 = __p0_0; \ - __ret_0 = splat_lane_p8(__s0_0, __p1_0); \ - __ret_0; \ -}) -#else -#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \ - poly8x8_t __ret_1; \ - poly8x8_t __s0_1 = __p0_1; \ - poly8x8_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \ - __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_1; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \ - poly16x4_t __ret_2; \ - poly16x4_t __s0_2 = __p0_2; \ - __ret_2 = splat_lane_p16(__s0_2, __p1_2); \ - __ret_2; \ -}) -#else -#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \ - poly16x4_t __ret_3; \ - poly16x4_t __s0_3 = __p0_3; \ - poly16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ - __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \ - __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ - __ret_3; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \ - poly8x16_t __ret_4; \ - poly8x8_t __s0_4 = __p0_4; \ - __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \ - __ret_4; \ -}) -#else -#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \ - poly8x16_t __ret_5; \ - poly8x8_t __s0_5 = __p0_5; \ - poly8x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \ - __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_5; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \ - poly16x8_t __ret_6; \ - poly16x4_t __s0_6 = __p0_6; \ - __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \ - __ret_6; \ -}) -#else -#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \ - poly16x8_t __ret_7; \ - poly16x4_t __s0_7 = __p0_7; \ - poly16x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ - __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \ - __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_7; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \ - uint8x16_t __ret_8; \ - uint8x8_t __s0_8 = __p0_8; \ - __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \ +#define vdup_lane_p8(__p0_8, __p1_8) __extension__ ({ \ + poly8x8_t __ret_8; \ + poly8x8_t __s0_8 = __p0_8; \ + __ret_8 = splat_lane_p8(__s0_8, __p1_8); \ __ret_8; \ }) #else -#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \ - uint8x16_t __ret_9; \ - uint8x8_t __s0_9 = __p0_9; \ - uint8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \ - __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdup_lane_p8(__p0_9, __p1_9) __extension__ ({ \ + poly8x8_t __ret_9; \ + poly8x8_t __s0_9 = __p0_9; \ + poly8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9 = __noswap_splat_lane_p8(__rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_9; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \ - uint32x4_t __ret_10; \ - uint32x2_t __s0_10 = __p0_10; \ - __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \ +#define vdup_lane_p16(__p0_10, __p1_10) __extension__ ({ \ + poly16x4_t __ret_10; \ + poly16x4_t __s0_10 = __p0_10; \ + __ret_10 = splat_lane_p16(__s0_10, __p1_10); \ __ret_10; \ }) #else -#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \ - uint32x4_t __ret_11; \ - uint32x2_t __s0_11 = __p0_11; \ - uint32x2_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \ - __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \ +#define vdup_lane_p16(__p0_11, __p1_11) __extension__ ({ \ + poly16x4_t __ret_11; \ + poly16x4_t __s0_11 = __p0_11; \ + poly16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \ + __ret_11 = __noswap_splat_lane_p16(__rev0_11, __p1_11); \ __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ __ret_11; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \ - uint64x2_t __ret_12; \ - uint64x1_t __s0_12 = __p0_12; \ - __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \ +#define vdupq_lane_p8(__p0_12, __p1_12) __extension__ ({ \ + poly8x16_t __ret_12; \ + poly8x8_t __s0_12 = __p0_12; \ + __ret_12 = splatq_lane_p8(__s0_12, __p1_12); \ __ret_12; \ }) #else -#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \ - uint64x2_t __ret_13; \ - uint64x1_t __s0_13 = __p0_13; \ - __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \ - __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \ +#define vdupq_lane_p8(__p0_13, __p1_13) __extension__ ({ \ + poly8x16_t __ret_13; \ + poly8x8_t __s0_13 = __p0_13; \ + poly8x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13 = __noswap_splatq_lane_p8(__rev0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_13; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \ - uint16x8_t __ret_14; \ - uint16x4_t __s0_14 = __p0_14; \ - __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \ +#define vdupq_lane_p16(__p0_14, __p1_14) __extension__ ({ \ + poly16x8_t __ret_14; \ + poly16x4_t __s0_14 = __p0_14; \ + __ret_14 = splatq_lane_p16(__s0_14, __p1_14); \ __ret_14; \ }) #else -#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \ - uint16x8_t __ret_15; \ - uint16x4_t __s0_15 = __p0_15; \ - uint16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ - __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \ +#define vdupq_lane_p16(__p0_15, __p1_15) __extension__ ({ \ + poly16x8_t __ret_15; \ + poly16x4_t __s0_15 = __p0_15; \ + poly16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + __ret_15 = __noswap_splatq_lane_p16(__rev0_15, __p1_15); \ __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_15; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \ - int8x16_t __ret_16; \ - int8x8_t __s0_16 = __p0_16; \ - __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \ +#define vdupq_lane_u8(__p0_16, __p1_16) __extension__ ({ \ + uint8x16_t __ret_16; \ + uint8x8_t __s0_16 = __p0_16; \ + __ret_16 = splatq_lane_u8(__s0_16, __p1_16); \ __ret_16; \ }) #else -#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \ - int8x16_t __ret_17; \ - int8x8_t __s0_17 = __p0_17; \ - int8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \ +#define vdupq_lane_u8(__p0_17, __p1_17) __extension__ ({ \ + uint8x16_t __ret_17; \ + uint8x8_t __s0_17 = __p0_17; \ + uint8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17 = __noswap_splatq_lane_u8(__rev0_17, __p1_17); \ __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_17; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \ - float32x4_t __ret_18; \ - float32x2_t __s0_18 = __p0_18; \ - __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \ +#define vdupq_lane_u32(__p0_18, __p1_18) __extension__ ({ \ + uint32x4_t __ret_18; \ + uint32x2_t __s0_18 = __p0_18; \ + __ret_18 = splatq_lane_u32(__s0_18, __p1_18); \ __ret_18; \ }) #else -#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \ - float32x4_t __ret_19; \ - float32x2_t __s0_19 = __p0_19; \ - float32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ - __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \ +#define vdupq_lane_u32(__p0_19, __p1_19) __extension__ ({ \ + uint32x4_t __ret_19; \ + uint32x2_t __s0_19 = __p0_19; \ + uint32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + __ret_19 = __noswap_splatq_lane_u32(__rev0_19, __p1_19); \ __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ __ret_19; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \ - float16x8_t __ret_20; \ - float16x4_t __s0_20 = __p0_20; \ - __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \ +#define vdupq_lane_u64(__p0_20, __p1_20) __extension__ ({ \ + uint64x2_t __ret_20; \ + uint64x1_t __s0_20 = __p0_20; \ + __ret_20 = splatq_lane_u64(__s0_20, __p1_20); \ __ret_20; \ }) #else -#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \ - float16x8_t __ret_21; \ - float16x4_t __s0_21 = __p0_21; \ - float16x4_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \ - __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \ - __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdupq_lane_u64(__p0_21, __p1_21) __extension__ ({ \ + uint64x2_t __ret_21; \ + uint64x1_t __s0_21 = __p0_21; \ + __ret_21 = __noswap_splatq_lane_u64(__s0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \ __ret_21; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \ - int32x4_t __ret_22; \ - int32x2_t __s0_22 = __p0_22; \ - __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \ +#define vdupq_lane_u16(__p0_22, __p1_22) __extension__ ({ \ + uint16x8_t __ret_22; \ + uint16x4_t __s0_22 = __p0_22; \ + __ret_22 = splatq_lane_u16(__s0_22, __p1_22); \ __ret_22; \ }) #else -#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \ - int32x4_t __ret_23; \ - int32x2_t __s0_23 = __p0_23; \ - int32x2_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \ - __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \ - __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \ +#define vdupq_lane_u16(__p0_23, __p1_23) __extension__ ({ \ + uint16x8_t __ret_23; \ + uint16x4_t __s0_23 = __p0_23; \ + uint16x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 3, 2, 1, 0); \ + __ret_23 = __noswap_splatq_lane_u16(__rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_23; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \ - int64x2_t __ret_24; \ - int64x1_t __s0_24 = __p0_24; \ - __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \ +#define vdupq_lane_s8(__p0_24, __p1_24) __extension__ ({ \ + int8x16_t __ret_24; \ + int8x8_t __s0_24 = __p0_24; \ + __ret_24 = splatq_lane_s8(__s0_24, __p1_24); \ __ret_24; \ }) #else -#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \ - int64x2_t __ret_25; \ - int64x1_t __s0_25 = __p0_25; \ - __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \ - __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \ +#define vdupq_lane_s8(__p0_25, __p1_25) __extension__ ({ \ + int8x16_t __ret_25; \ + int8x8_t __s0_25 = __p0_25; \ + int8x8_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25 = __noswap_splatq_lane_s8(__rev0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_25; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \ - int16x8_t __ret_26; \ - int16x4_t __s0_26 = __p0_26; \ - __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \ +#define vdupq_lane_f32(__p0_26, __p1_26) __extension__ ({ \ + float32x4_t __ret_26; \ + float32x2_t __s0_26 = __p0_26; \ + __ret_26 = splatq_lane_f32(__s0_26, __p1_26); \ __ret_26; \ }) #else -#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \ - int16x8_t __ret_27; \ - int16x4_t __s0_27 = __p0_27; \ - int16x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \ - __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \ - __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdupq_lane_f32(__p0_27, __p1_27) __extension__ ({ \ + float32x4_t __ret_27; \ + float32x2_t __s0_27 = __p0_27; \ + float32x2_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 1, 0); \ + __ret_27 = __noswap_splatq_lane_f32(__rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \ __ret_27; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \ - uint8x8_t __ret_28; \ - uint8x8_t __s0_28 = __p0_28; \ - __ret_28 = splat_lane_u8(__s0_28, __p1_28); \ +#define vdupq_lane_f16(__p0_28, __p1_28) __extension__ ({ \ + float16x8_t __ret_28; \ + float16x4_t __s0_28 = __p0_28; \ + __ret_28 = splatq_lane_f16(__s0_28, __p1_28); \ __ret_28; \ }) #else -#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \ - uint8x8_t __ret_29; \ - uint8x8_t __s0_29 = __p0_29; \ - uint8x8_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \ +#define vdupq_lane_f16(__p0_29, __p1_29) __extension__ ({ \ + float16x8_t __ret_29; \ + float16x4_t __s0_29 = __p0_29; \ + float16x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \ + __ret_29 = __noswap_splatq_lane_f16(__rev0_29, __p1_29); \ __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_29; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \ - uint32x2_t __ret_30; \ - uint32x2_t __s0_30 = __p0_30; \ - __ret_30 = splat_lane_u32(__s0_30, __p1_30); \ +#define vdupq_lane_s32(__p0_30, __p1_30) __extension__ ({ \ + int32x4_t __ret_30; \ + int32x2_t __s0_30 = __p0_30; \ + __ret_30 = splatq_lane_s32(__s0_30, __p1_30); \ __ret_30; \ }) #else -#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \ - uint32x2_t __ret_31; \ - uint32x2_t __s0_31 = __p0_31; \ - uint32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ - __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \ - __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \ +#define vdupq_lane_s32(__p0_31, __p1_31) __extension__ ({ \ + int32x4_t __ret_31; \ + int32x2_t __s0_31 = __p0_31; \ + int32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ + __ret_31 = __noswap_splatq_lane_s32(__rev0_31, __p1_31); \ + __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 3, 2, 1, 0); \ __ret_31; \ }) #endif -#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \ - uint64x1_t __ret_32; \ - uint64x1_t __s0_32 = __p0_32; \ - __ret_32 = splat_lane_u64(__s0_32, __p1_32); \ +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s64(__p0_32, __p1_32) __extension__ ({ \ + int64x2_t __ret_32; \ + int64x1_t __s0_32 = __p0_32; \ + __ret_32 = splatq_lane_s64(__s0_32, __p1_32); \ __ret_32; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \ - uint16x4_t __ret_33; \ - uint16x4_t __s0_33 = __p0_33; \ - __ret_33 = splat_lane_u16(__s0_33, __p1_33); \ +#else +#define vdupq_lane_s64(__p0_33, __p1_33) __extension__ ({ \ + int64x2_t __ret_33; \ + int64x1_t __s0_33 = __p0_33; \ + __ret_33 = __noswap_splatq_lane_s64(__s0_33, __p1_33); \ + __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 1, 0); \ __ret_33; \ }) -#else -#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \ - uint16x4_t __ret_34; \ - uint16x4_t __s0_34 = __p0_34; \ - uint16x4_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \ - __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \ - __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s16(__p0_34, __p1_34) __extension__ ({ \ + int16x8_t __ret_34; \ + int16x4_t __s0_34 = __p0_34; \ + __ret_34 = splatq_lane_s16(__s0_34, __p1_34); \ __ret_34; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \ - int8x8_t __ret_35; \ - int8x8_t __s0_35 = __p0_35; \ - __ret_35 = splat_lane_s8(__s0_35, __p1_35); \ +#else +#define vdupq_lane_s16(__p0_35, __p1_35) __extension__ ({ \ + int16x8_t __ret_35; \ + int16x4_t __s0_35 = __p0_35; \ + int16x4_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 3, 2, 1, 0); \ + __ret_35 = __noswap_splatq_lane_s16(__rev0_35, __p1_35); \ + __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_35; \ }) -#else -#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \ - int8x8_t __ret_36; \ - int8x8_t __s0_36 = __p0_36; \ - int8x8_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \ - __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u8(__p0_36, __p1_36) __extension__ ({ \ + uint8x8_t __ret_36; \ + uint8x8_t __s0_36 = __p0_36; \ + __ret_36 = splat_lane_u8(__s0_36, __p1_36); \ __ret_36; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \ - float32x2_t __ret_37; \ - float32x2_t __s0_37 = __p0_37; \ - __ret_37 = splat_lane_f32(__s0_37, __p1_37); \ +#else +#define vdup_lane_u8(__p0_37, __p1_37) __extension__ ({ \ + uint8x8_t __ret_37; \ + uint8x8_t __s0_37 = __p0_37; \ + uint8x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_37 = __noswap_splat_lane_u8(__rev0_37, __p1_37); \ + __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_37; \ }) -#else -#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \ - float32x2_t __ret_38; \ - float32x2_t __s0_38 = __p0_38; \ - float32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ - __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \ - __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0_38, __p1_38) __extension__ ({ \ + uint32x2_t __ret_38; \ + uint32x2_t __s0_38 = __p0_38; \ + __ret_38 = splat_lane_u32(__s0_38, __p1_38); \ __ret_38; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \ - float16x4_t __ret_39; \ - float16x4_t __s0_39 = __p0_39; \ - __ret_39 = splat_lane_f16(__s0_39, __p1_39); \ +#else +#define vdup_lane_u32(__p0_39, __p1_39) __extension__ ({ \ + uint32x2_t __ret_39; \ + uint32x2_t __s0_39 = __p0_39; \ + uint32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \ + __ret_39 = __noswap_splat_lane_u32(__rev0_39, __p1_39); \ + __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \ __ret_39; \ }) -#else -#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \ - float16x4_t __ret_40; \ - float16x4_t __s0_40 = __p0_40; \ - float16x4_t __rev0_40; __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \ - __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \ - __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \ - __ret_40; \ -}) #endif +#define vdup_lane_u64(__p0_40, __p1_40) __extension__ ({ \ + uint64x1_t __ret_40; \ + uint64x1_t __s0_40 = __p0_40; \ + __ret_40 = splat_lane_u64(__s0_40, __p1_40); \ + __ret_40; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \ - int32x2_t __ret_41; \ - int32x2_t __s0_41 = __p0_41; \ - __ret_41 = splat_lane_s32(__s0_41, __p1_41); \ +#define vdup_lane_u16(__p0_41, __p1_41) __extension__ ({ \ + uint16x4_t __ret_41; \ + uint16x4_t __s0_41 = __p0_41; \ + __ret_41 = splat_lane_u16(__s0_41, __p1_41); \ __ret_41; \ }) #else -#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \ - int32x2_t __ret_42; \ - int32x2_t __s0_42 = __p0_42; \ - int32x2_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \ - __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \ - __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \ +#define vdup_lane_u16(__p0_42, __p1_42) __extension__ ({ \ + uint16x4_t __ret_42; \ + uint16x4_t __s0_42 = __p0_42; \ + uint16x4_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 3, 2, 1, 0); \ + __ret_42 = __noswap_splat_lane_u16(__rev0_42, __p1_42); \ + __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 3, 2, 1, 0); \ __ret_42; \ }) #endif -#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \ - int64x1_t __ret_43; \ - int64x1_t __s0_43 = __p0_43; \ - __ret_43 = splat_lane_s64(__s0_43, __p1_43); \ +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s8(__p0_43, __p1_43) __extension__ ({ \ + int8x8_t __ret_43; \ + int8x8_t __s0_43 = __p0_43; \ + __ret_43 = splat_lane_s8(__s0_43, __p1_43); \ __ret_43; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \ - int16x4_t __ret_44; \ - int16x4_t __s0_44 = __p0_44; \ - __ret_44 = splat_lane_s16(__s0_44, __p1_44); \ +#else +#define vdup_lane_s8(__p0_44, __p1_44) __extension__ ({ \ + int8x8_t __ret_44; \ + int8x8_t __s0_44 = __p0_44; \ + int8x8_t __rev0_44; __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_44 = __noswap_splat_lane_s8(__rev0_44, __p1_44); \ + __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_44; \ }) -#else -#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \ - int16x4_t __ret_45; \ - int16x4_t __s0_45 = __p0_45; \ - int16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \ - __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \ - __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f32(__p0_45, __p1_45) __extension__ ({ \ + float32x2_t __ret_45; \ + float32x2_t __s0_45 = __p0_45; \ + __ret_45 = splat_lane_f32(__s0_45, __p1_45); \ __ret_45; \ }) +#else +#define vdup_lane_f32(__p0_46, __p1_46) __extension__ ({ \ + float32x2_t __ret_46; \ + float32x2_t __s0_46 = __p0_46; \ + float32x2_t __rev0_46; __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 1, 0); \ + __ret_46 = __noswap_splat_lane_f32(__rev0_46, __p1_46); \ + __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 1, 0); \ + __ret_46; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { +#define vdup_lane_f16(__p0_47, __p1_47) __extension__ ({ \ + float16x4_t __ret_47; \ + float16x4_t __s0_47 = __p0_47; \ + __ret_47 = splat_lane_f16(__s0_47, __p1_47); \ + __ret_47; \ +}) +#else +#define vdup_lane_f16(__p0_48, __p1_48) __extension__ ({ \ + float16x4_t __ret_48; \ + float16x4_t __s0_48 = __p0_48; \ + float16x4_t __rev0_48; __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 3, 2, 1, 0); \ + __ret_48 = __noswap_splat_lane_f16(__rev0_48, __p1_48); \ + __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 3, 2, 1, 0); \ + __ret_48; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0_49, __p1_49) __extension__ ({ \ + int32x2_t __ret_49; \ + int32x2_t __s0_49 = __p0_49; \ + __ret_49 = splat_lane_s32(__s0_49, __p1_49); \ + __ret_49; \ +}) +#else +#define vdup_lane_s32(__p0_50, __p1_50) __extension__ ({ \ + int32x2_t __ret_50; \ + int32x2_t __s0_50 = __p0_50; \ + int32x2_t __rev0_50; __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \ + __ret_50 = __noswap_splat_lane_s32(__rev0_50, __p1_50); \ + __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \ + __ret_50; \ +}) +#endif + +#define vdup_lane_s64(__p0_51, __p1_51) __extension__ ({ \ + int64x1_t __ret_51; \ + int64x1_t __s0_51 = __p0_51; \ + __ret_51 = splat_lane_s64(__s0_51, __p1_51); \ + __ret_51; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0_52, __p1_52) __extension__ ({ \ + int16x4_t __ret_52; \ + int16x4_t __s0_52 = __p0_52; \ + __ret_52 = splat_lane_s16(__s0_52, __p1_52); \ + __ret_52; \ +}) +#else +#define vdup_lane_s16(__p0_53, __p1_53) __extension__ ({ \ + int16x4_t __ret_53; \ + int16x4_t __s0_53 = __p0_53; \ + int16x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ + __ret_53 = __noswap_splat_lane_s16(__rev0_53, __p1_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ + __ret_53; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6033,13 +9252,13 @@ __ai poly8x8_t vdup_n_p8(poly8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6048,13 +9267,13 @@ __ai poly16x4_t vdup_n_p16(poly16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6063,13 +9282,13 @@ __ai poly8x16_t vdupq_n_p8(poly8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6078,13 +9297,13 @@ __ai poly16x8_t vdupq_n_p16(poly16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6093,13 +9312,13 @@ __ai uint8x16_t vdupq_n_u8(uint8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6108,13 +9327,13 @@ __ai uint32x4_t vdupq_n_u32(uint32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; return __ret; } #else -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -6123,13 +9342,13 @@ __ai uint64x2_t vdupq_n_u64(uint64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6138,13 +9357,13 @@ __ai uint16x8_t vdupq_n_u16(uint16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vdupq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int8x16_t vdupq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6153,13 +9372,13 @@ __ai int8x16_t vdupq_n_s8(int8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdupq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai float32x4_t vdupq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6185,13 +9404,13 @@ __ai float32x4_t vdupq_n_f32(float32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vdupq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int32x4_t vdupq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6200,13 +9419,13 @@ __ai int32x4_t vdupq_n_s32(int32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vdupq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; return __ret; } #else -__ai int64x2_t vdupq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -6215,13 +9434,13 @@ __ai int64x2_t vdupq_n_s64(int64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vdupq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x8_t vdupq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6230,13 +9449,13 @@ __ai int16x8_t vdupq_n_s16(int16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6245,13 +9464,13 @@ __ai uint8x8_t vdup_n_u8(uint8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; return __ret; } #else -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -6259,19 +9478,19 @@ __ai uint32x2_t vdup_n_u32(uint32_t __p0) { } #endif -__ai uint64x1_t vdup_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vdup_n_u64(uint64_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6280,13 +9499,13 @@ __ai uint16x4_t vdup_n_u16(uint16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vdup_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int8x8_t vdup_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6295,13 +9514,13 @@ __ai int8x8_t vdup_n_s8(int8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdup_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; return __ret; } #else -__ai float32x2_t vdup_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -6327,13 +9546,13 @@ __ai float32x2_t vdup_n_f32(float32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vdup_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; return __ret; } #else -__ai int32x2_t vdup_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -6341,19 +9560,19 @@ __ai int32x2_t vdup_n_s32(int32_t __p0) { } #endif -__ai int64x1_t vdup_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vdup_n_s64(int64_t __p0) { int64x1_t __ret; __ret = (int64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vdup_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x4_t vdup_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -6362,13 +9581,13 @@ __ai int16x4_t vdup_n_s16(int16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6379,13 +9598,13 @@ __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -6396,13 +9615,13 @@ __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -6413,13 +9632,13 @@ __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6430,13 +9649,13 @@ __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6447,13 +9666,13 @@ __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -6464,13 +9683,13 @@ __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -6481,13 +9700,13 @@ __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6498,13 +9717,13 @@ __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6515,13 +9734,13 @@ __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -6531,19 +9750,19 @@ __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 ^ __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -6554,13 +9773,13 @@ __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -6571,13 +9790,13 @@ __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -6587,19 +9806,19 @@ __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 ^ __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 ^ __p1; return __ret; } #else -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -7044,20 +10263,62 @@ __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; @@ -7065,13 +10326,13 @@ __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { poly16x4_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); @@ -7081,20 +10342,20 @@ __ai poly16x4_t vget_high_p16(poly16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; @@ -7102,20 +10363,20 @@ __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; @@ -7123,13 +10384,13 @@ __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { uint64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { uint64x1_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); @@ -7138,20 +10399,20 @@ __ai uint64x1_t vget_high_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; @@ -7159,20 +10420,20 @@ __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_high_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai int8x8_t vget_high_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; @@ -7180,20 +10441,20 @@ __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_high_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else -__ai float32x2_t vget_high_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { float32x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; @@ -7201,20 +10462,20 @@ __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_high_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else -__ai float16x4_t vget_high_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { float16x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; @@ -7222,20 +10483,20 @@ __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_high_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; } #else -__ai int32x2_t vget_high_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { int32x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 2, 3); return __ret; @@ -7243,13 +10504,13 @@ __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_high_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { int64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai int64x1_t vget_high_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { int64x1_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); @@ -7258,20 +10519,20 @@ __ai int64x1_t vget_high_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_high_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; } #else -__ai int16x4_t vget_high_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { int16x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); return __ret; @@ -7751,13 +11012,13 @@ __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); @@ -7767,13 +11028,13 @@ __ai poly8x8_t vget_low_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { poly16x4_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); @@ -7783,13 +11044,13 @@ __ai poly16x4_t vget_low_p16(poly16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); @@ -7799,13 +11060,13 @@ __ai uint8x8_t vget_low_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { uint32x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); @@ -7815,13 +11076,13 @@ __ai uint32x2_t vget_low_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { uint64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { uint64x1_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); @@ -7830,13 +11091,13 @@ __ai uint64x1_t vget_low_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { uint16x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); @@ -7846,13 +11107,13 @@ __ai uint16x4_t vget_low_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_low_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai int8x8_t vget_low_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); @@ -7862,13 +11123,13 @@ __ai int8x8_t vget_low_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_low_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else -__ai float32x2_t vget_low_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { float32x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); @@ -7878,13 +11139,13 @@ __ai float32x2_t vget_low_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_low_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else -__ai float16x4_t vget_low_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { float16x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); @@ -7894,13 +11155,13 @@ __ai float16x4_t vget_low_f16(float16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_low_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1); return __ret; } #else -__ai int32x2_t vget_low_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { int32x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); @@ -7910,13 +11171,13 @@ __ai int32x2_t vget_low_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_low_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { int64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai int64x1_t vget_low_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { int64x1_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); @@ -7925,13 +11186,13 @@ __ai int64x1_t vget_low_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_low_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); return __ret; } #else -__ai int16x4_t vget_low_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { int16x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); @@ -7941,13 +11202,13 @@ __ai int16x4_t vget_low_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -7958,13 +11219,13 @@ __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -7975,13 +11236,13 @@ __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -7992,13 +11253,13 @@ __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8009,13 +11270,13 @@ __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8026,13 +11287,13 @@ __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8043,13 +11304,13 @@ __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8060,13 +11321,13 @@ __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -8077,13 +11338,13 @@ __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8094,13 +11355,13 @@ __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8111,13 +11372,13 @@ __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -8128,13 +11389,13 @@ __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8145,13 +11406,13 @@ __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8162,13 +11423,13 @@ __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8179,13 +11440,13 @@ __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8196,13 +11457,13 @@ __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8213,13 +11474,13 @@ __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8230,13 +11491,13 @@ __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8247,13 +11508,13 @@ __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8264,13 +11525,13 @@ __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -8281,13 +11542,13 @@ __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -8298,13 +11559,13 @@ __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -8315,13 +11576,13 @@ __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -8332,13 +11593,13 @@ __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13643,13 +16904,13 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13660,13 +16921,13 @@ __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13677,13 +16938,13 @@ __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13694,13 +16955,13 @@ __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13711,13 +16972,13 @@ __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13728,13 +16989,13 @@ __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13745,13 +17006,13 @@ __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13762,13 +17023,13 @@ __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13779,13 +17040,13 @@ __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -13796,13 +17057,13 @@ __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13813,13 +17074,13 @@ __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13830,13 +17091,13 @@ __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -13847,13 +17108,13 @@ __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -13864,13 +17125,13 @@ __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13881,13 +17142,13 @@ __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13898,13 +17159,13 @@ __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13915,13 +17176,13 @@ __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13932,13 +17193,13 @@ __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -13949,13 +17210,13 @@ __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13966,13 +17227,13 @@ __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -13983,13 +17244,13 @@ __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14000,13 +17261,13 @@ __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14017,13 +17278,13 @@ __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14034,13 +17295,13 @@ __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14051,13 +17312,13 @@ __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14068,13 +17329,13 @@ __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14085,13 +17346,13 @@ __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14102,13 +17363,13 @@ __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14119,13 +17380,13 @@ __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14137,13 +17398,13 @@ __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14155,13 +17416,13 @@ __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14173,13 +17434,13 @@ __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14191,13 +17452,13 @@ __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14209,13 +17470,13 @@ __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14227,13 +17488,13 @@ __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14245,13 +17506,13 @@ __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14263,13 +17524,13 @@ __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14281,13 +17542,13 @@ __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14299,13 +17560,13 @@ __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14317,13 +17578,13 @@ __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14335,13 +17596,13 @@ __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14353,13 +17614,13 @@ __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14371,253 +17632,253 @@ __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ - uint32x4_t __ret_46; \ - uint32x4_t __s0_46 = __p0_46; \ - uint32x4_t __s1_46 = __p1_46; \ - uint32x2_t __s2_46 = __p2_46; \ - __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \ - __ret_46; \ -}) -#else -#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ - uint32x4_t __ret_47; \ - uint32x4_t __s0_47 = __p0_47; \ - uint32x4_t __s1_47 = __p1_47; \ - uint32x2_t __s2_47 = __p2_47; \ - uint32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ - uint32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ - uint32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ - __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \ - __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ - __ret_47; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ - uint16x8_t __ret_48; \ - uint16x8_t __s0_48 = __p0_48; \ - uint16x8_t __s1_48 = __p1_48; \ - uint16x4_t __s2_48 = __p2_48; \ - __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \ - __ret_48; \ -}) -#else -#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ - uint16x8_t __ret_49; \ - uint16x8_t __s0_49 = __p0_49; \ - uint16x8_t __s1_49 = __p1_49; \ - uint16x4_t __s2_49 = __p2_49; \ - uint16x8_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \ - __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \ - __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_49; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ - float32x4_t __ret_50; \ - float32x4_t __s0_50 = __p0_50; \ - float32x4_t __s1_50 = __p1_50; \ - float32x2_t __s2_50 = __p2_50; \ - __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \ - __ret_50; \ -}) -#else -#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ - float32x4_t __ret_51; \ - float32x4_t __s0_51 = __p0_51; \ - float32x4_t __s1_51 = __p1_51; \ - float32x2_t __s2_51 = __p2_51; \ - float32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \ - float32x4_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \ - float32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \ - __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \ - __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \ - __ret_51; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ - int32x4_t __ret_52; \ - int32x4_t __s0_52 = __p0_52; \ - int32x4_t __s1_52 = __p1_52; \ - int32x2_t __s2_52 = __p2_52; \ - __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \ - __ret_52; \ -}) -#else -#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ - int32x4_t __ret_53; \ - int32x4_t __s0_53 = __p0_53; \ - int32x4_t __s1_53 = __p1_53; \ - int32x2_t __s2_53 = __p2_53; \ - int32x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ - int32x4_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \ - int32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ - __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \ - __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ - __ret_53; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ - int16x8_t __ret_54; \ - int16x8_t __s0_54 = __p0_54; \ - int16x8_t __s1_54 = __p1_54; \ - int16x4_t __s2_54 = __p2_54; \ - __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \ +#define vmlaq_lane_u32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + uint32x4_t __ret_54; \ + uint32x4_t __s0_54 = __p0_54; \ + uint32x4_t __s1_54 = __p1_54; \ + uint32x2_t __s2_54 = __p2_54; \ + __ret_54 = __s0_54 + __s1_54 * splatq_lane_u32(__s2_54, __p3_54); \ __ret_54; \ }) #else -#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ - int16x8_t __ret_55; \ - int16x8_t __s0_55 = __p0_55; \ - int16x8_t __s1_55 = __p1_55; \ - int16x4_t __s2_55 = __p2_55; \ - int16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ - __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \ - __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmlaq_lane_u32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + uint32x4_t __ret_55; \ + uint32x4_t __s0_55 = __p0_55; \ + uint32x4_t __s1_55 = __p1_55; \ + uint32x2_t __s2_55 = __p2_55; \ + uint32x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ + uint32x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ + uint32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 1, 0); \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_u32(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ __ret_55; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ - uint32x2_t __ret_56; \ - uint32x2_t __s0_56 = __p0_56; \ - uint32x2_t __s1_56 = __p1_56; \ - uint32x2_t __s2_56 = __p2_56; \ - __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \ +#define vmlaq_lane_u16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + uint16x8_t __ret_56; \ + uint16x8_t __s0_56 = __p0_56; \ + uint16x8_t __s1_56 = __p1_56; \ + uint16x4_t __s2_56 = __p2_56; \ + __ret_56 = __s0_56 + __s1_56 * splatq_lane_u16(__s2_56, __p3_56); \ __ret_56; \ }) #else -#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ - uint32x2_t __ret_57; \ - uint32x2_t __s0_57 = __p0_57; \ - uint32x2_t __s1_57 = __p1_57; \ - uint32x2_t __s2_57 = __p2_57; \ - uint32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ - uint32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ - uint32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ - __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \ - __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ +#define vmlaq_lane_u16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + uint16x8_t __ret_57; \ + uint16x8_t __s0_57 = __p0_57; \ + uint16x8_t __s1_57 = __p1_57; \ + uint16x4_t __s2_57 = __p2_57; \ + uint16x8_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 3, 2, 1, 0); \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splatq_lane_u16(__rev2_57, __p3_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_57; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ - uint16x4_t __ret_58; \ - uint16x4_t __s0_58 = __p0_58; \ - uint16x4_t __s1_58 = __p1_58; \ - uint16x4_t __s2_58 = __p2_58; \ - __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \ +#define vmlaq_lane_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + float32x4_t __ret_58; \ + float32x4_t __s0_58 = __p0_58; \ + float32x4_t __s1_58 = __p1_58; \ + float32x2_t __s2_58 = __p2_58; \ + __ret_58 = __s0_58 + __s1_58 * splatq_lane_f32(__s2_58, __p3_58); \ __ret_58; \ }) #else -#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ - uint16x4_t __ret_59; \ - uint16x4_t __s0_59 = __p0_59; \ - uint16x4_t __s1_59 = __p1_59; \ - uint16x4_t __s2_59 = __p2_59; \ - uint16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ - uint16x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ - uint16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ - __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \ +#define vmlaq_lane_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + float32x4_t __ret_59; \ + float32x4_t __s0_59 = __p0_59; \ + float32x4_t __s1_59 = __p1_59; \ + float32x2_t __s2_59 = __p2_59; \ + float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + float32x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ + float32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splatq_lane_f32(__rev2_59, __p3_59); \ __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ __ret_59; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ - float32x2_t __ret_60; \ - float32x2_t __s0_60 = __p0_60; \ - float32x2_t __s1_60 = __p1_60; \ - float32x2_t __s2_60 = __p2_60; \ - __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \ +#define vmlaq_lane_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + int32x4_t __ret_60; \ + int32x4_t __s0_60 = __p0_60; \ + int32x4_t __s1_60 = __p1_60; \ + int32x2_t __s2_60 = __p2_60; \ + __ret_60 = __s0_60 + __s1_60 * splatq_lane_s32(__s2_60, __p3_60); \ __ret_60; \ }) #else -#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ - float32x2_t __ret_61; \ - float32x2_t __s0_61 = __p0_61; \ - float32x2_t __s1_61 = __p1_61; \ - float32x2_t __s2_61 = __p2_61; \ - float32x2_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \ - float32x2_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \ - float32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ - __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \ - __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \ +#define vmlaq_lane_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + int32x4_t __ret_61; \ + int32x4_t __s0_61 = __p0_61; \ + int32x4_t __s1_61 = __p1_61; \ + int32x2_t __s2_61 = __p2_61; \ + int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ + int32x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ + int32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splatq_lane_s32(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ __ret_61; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ - int32x2_t __ret_62; \ - int32x2_t __s0_62 = __p0_62; \ - int32x2_t __s1_62 = __p1_62; \ - int32x2_t __s2_62 = __p2_62; \ - __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \ +#define vmlaq_lane_s16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int16x8_t __ret_62; \ + int16x8_t __s0_62 = __p0_62; \ + int16x8_t __s1_62 = __p1_62; \ + int16x4_t __s2_62 = __p2_62; \ + __ret_62 = __s0_62 + __s1_62 * splatq_lane_s16(__s2_62, __p3_62); \ __ret_62; \ }) #else -#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ - int32x2_t __ret_63; \ - int32x2_t __s0_63 = __p0_63; \ - int32x2_t __s1_63 = __p1_63; \ - int32x2_t __s2_63 = __p2_63; \ - int32x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \ - int32x2_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \ - int32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ - __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \ - __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \ +#define vmlaq_lane_s16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int16x8_t __ret_63; \ + int16x8_t __s0_63 = __p0_63; \ + int16x8_t __s1_63 = __p1_63; \ + int16x4_t __s2_63 = __p2_63; \ + int16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 3, 2, 1, 0); \ + __ret_63 = __rev0_63 + __rev1_63 * __noswap_splatq_lane_s16(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_63; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ - int16x4_t __ret_64; \ - int16x4_t __s0_64 = __p0_64; \ - int16x4_t __s1_64 = __p1_64; \ - int16x4_t __s2_64 = __p2_64; \ - __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \ +#define vmla_lane_u32(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + uint32x2_t __ret_64; \ + uint32x2_t __s0_64 = __p0_64; \ + uint32x2_t __s1_64 = __p1_64; \ + uint32x2_t __s2_64 = __p2_64; \ + __ret_64 = __s0_64 + __s1_64 * splat_lane_u32(__s2_64, __p3_64); \ __ret_64; \ }) #else -#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ - int16x4_t __ret_65; \ - int16x4_t __s0_65 = __p0_65; \ - int16x4_t __s1_65 = __p1_65; \ - int16x4_t __s2_65 = __p2_65; \ - int16x4_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \ - int16x4_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \ - int16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ - __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \ - __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \ +#define vmla_lane_u32(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + uint32x2_t __ret_65; \ + uint32x2_t __s0_65 = __p0_65; \ + uint32x2_t __s1_65 = __p1_65; \ + uint32x2_t __s2_65 = __p2_65; \ + uint32x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 1, 0); \ + uint32x2_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 1, 0); \ + uint32x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 1, 0); \ + __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_u32(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 1, 0); \ __ret_65; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { +#define vmla_lane_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + uint16x4_t __ret_66; \ + uint16x4_t __s0_66 = __p0_66; \ + uint16x4_t __s1_66 = __p1_66; \ + uint16x4_t __s2_66 = __p2_66; \ + __ret_66 = __s0_66 + __s1_66 * splat_lane_u16(__s2_66, __p3_66); \ + __ret_66; \ +}) +#else +#define vmla_lane_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + uint16x4_t __ret_67; \ + uint16x4_t __s0_67 = __p0_67; \ + uint16x4_t __s1_67 = __p1_67; \ + uint16x4_t __s2_67 = __p2_67; \ + uint16x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + uint16x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + uint16x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 3, 2, 1, 0); \ + __ret_67 = __rev0_67 + __rev1_67 * __noswap_splat_lane_u16(__rev2_67, __p3_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + float32x2_t __ret_68; \ + float32x2_t __s0_68 = __p0_68; \ + float32x2_t __s1_68 = __p1_68; \ + float32x2_t __s2_68 = __p2_68; \ + __ret_68 = __s0_68 + __s1_68 * splat_lane_f32(__s2_68, __p3_68); \ + __ret_68; \ +}) +#else +#define vmla_lane_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + float32x2_t __ret_69; \ + float32x2_t __s0_69 = __p0_69; \ + float32x2_t __s1_69 = __p1_69; \ + float32x2_t __s2_69 = __p2_69; \ + float32x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 1, 0); \ + float32x2_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 1, 0); \ + float32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ + __ret_69 = __rev0_69 + __rev1_69 * __noswap_splat_lane_f32(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + int32x2_t __ret_70; \ + int32x2_t __s0_70 = __p0_70; \ + int32x2_t __s1_70 = __p1_70; \ + int32x2_t __s2_70 = __p2_70; \ + __ret_70 = __s0_70 + __s1_70 * splat_lane_s32(__s2_70, __p3_70); \ + __ret_70; \ +}) +#else +#define vmla_lane_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + int32x2_t __ret_71; \ + int32x2_t __s0_71 = __p0_71; \ + int32x2_t __s1_71 = __p1_71; \ + int32x2_t __s2_71 = __p2_71; \ + int32x2_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 1, 0); \ + int32x2_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 1, 0); \ + int32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ + __ret_71 = __rev0_71 + __rev1_71 * __noswap_splat_lane_s32(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + int16x4_t __ret_72; \ + int16x4_t __s0_72 = __p0_72; \ + int16x4_t __s1_72 = __p1_72; \ + int16x4_t __s2_72 = __p2_72; \ + __ret_72 = __s0_72 + __s1_72 * splat_lane_s16(__s2_72, __p3_72); \ + __ret_72; \ +}) +#else +#define vmla_lane_s16(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + int16x4_t __ret_73; \ + int16x4_t __s0_73 = __p0_73; \ + int16x4_t __s1_73 = __p1_73; \ + int16x4_t __s2_73 = __p2_73; \ + int16x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ + int16x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ + int16x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \ + __ret_73 = __rev0_73 + __rev1_73 * __noswap_splat_lane_s16(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14628,13 +17889,13 @@ __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14645,13 +17906,13 @@ __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14662,13 +17923,13 @@ __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14679,13 +17940,13 @@ __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14696,13 +17957,13 @@ __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14713,13 +17974,13 @@ __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14730,13 +17991,13 @@ __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14747,13 +18008,13 @@ __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14764,13 +18025,13 @@ __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14781,13 +18042,13 @@ __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14799,13 +18060,13 @@ __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14817,13 +18078,13 @@ __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14835,13 +18096,13 @@ __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14853,13 +18114,13 @@ __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14871,13 +18132,13 @@ __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14889,13 +18150,13 @@ __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14907,13 +18168,13 @@ __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14925,13 +18186,13 @@ __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14943,13 +18204,13 @@ __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -14961,13 +18222,13 @@ __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -14979,13 +18240,13 @@ __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -14997,13 +18258,13 @@ __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -15015,13 +18276,13 @@ __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15033,253 +18294,253 @@ __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ - uint32x4_t __ret_66; \ - uint32x4_t __s0_66 = __p0_66; \ - uint32x4_t __s1_66 = __p1_66; \ - uint32x2_t __s2_66 = __p2_66; \ - __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \ - __ret_66; \ -}) -#else -#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ - uint32x4_t __ret_67; \ - uint32x4_t __s0_67 = __p0_67; \ - uint32x4_t __s1_67 = __p1_67; \ - uint32x2_t __s2_67 = __p2_67; \ - uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ - uint32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ - uint32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ - __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \ - __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ - __ret_67; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ - uint16x8_t __ret_68; \ - uint16x8_t __s0_68 = __p0_68; \ - uint16x8_t __s1_68 = __p1_68; \ - uint16x4_t __s2_68 = __p2_68; \ - __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \ - __ret_68; \ -}) -#else -#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ - uint16x8_t __ret_69; \ - uint16x8_t __s0_69 = __p0_69; \ - uint16x8_t __s1_69 = __p1_69; \ - uint16x4_t __s2_69 = __p2_69; \ - uint16x8_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \ - __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \ - __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_69; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ - float32x4_t __ret_70; \ - float32x4_t __s0_70 = __p0_70; \ - float32x4_t __s1_70 = __p1_70; \ - float32x2_t __s2_70 = __p2_70; \ - __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \ - __ret_70; \ -}) -#else -#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ - float32x4_t __ret_71; \ - float32x4_t __s0_71 = __p0_71; \ - float32x4_t __s1_71 = __p1_71; \ - float32x2_t __s2_71 = __p2_71; \ - float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \ - float32x4_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \ - float32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ - __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \ - __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \ - __ret_71; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ - int32x4_t __ret_72; \ - int32x4_t __s0_72 = __p0_72; \ - int32x4_t __s1_72 = __p1_72; \ - int32x2_t __s2_72 = __p2_72; \ - __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \ - __ret_72; \ -}) -#else -#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ - int32x4_t __ret_73; \ - int32x4_t __s0_73 = __p0_73; \ - int32x4_t __s1_73 = __p1_73; \ - int32x2_t __s2_73 = __p2_73; \ - int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ - int32x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ - int32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ - __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \ - __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ - __ret_73; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ - int16x8_t __ret_74; \ - int16x8_t __s0_74 = __p0_74; \ - int16x8_t __s1_74 = __p1_74; \ - int16x4_t __s2_74 = __p2_74; \ - __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \ +#define vmlsq_lane_u32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + uint32x4_t __ret_74; \ + uint32x4_t __s0_74 = __p0_74; \ + uint32x4_t __s1_74 = __p1_74; \ + uint32x2_t __s2_74 = __p2_74; \ + __ret_74 = __s0_74 - __s1_74 * splatq_lane_u32(__s2_74, __p3_74); \ __ret_74; \ }) #else -#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ - int16x8_t __ret_75; \ - int16x8_t __s0_75 = __p0_75; \ - int16x8_t __s1_75 = __p1_75; \ - int16x4_t __s2_75 = __p2_75; \ - int16x8_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ - __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \ - __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmlsq_lane_u32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + uint32x4_t __ret_75; \ + uint32x4_t __s0_75 = __p0_75; \ + uint32x4_t __s1_75 = __p1_75; \ + uint32x2_t __s2_75 = __p2_75; \ + uint32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ + uint32x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ + uint32x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_u32(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ __ret_75; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ - uint32x2_t __ret_76; \ - uint32x2_t __s0_76 = __p0_76; \ - uint32x2_t __s1_76 = __p1_76; \ - uint32x2_t __s2_76 = __p2_76; \ - __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \ +#define vmlsq_lane_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint16x8_t __ret_76; \ + uint16x8_t __s0_76 = __p0_76; \ + uint16x8_t __s1_76 = __p1_76; \ + uint16x4_t __s2_76 = __p2_76; \ + __ret_76 = __s0_76 - __s1_76 * splatq_lane_u16(__s2_76, __p3_76); \ __ret_76; \ }) #else -#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ - uint32x2_t __ret_77; \ - uint32x2_t __s0_77 = __p0_77; \ - uint32x2_t __s1_77 = __p1_77; \ - uint32x2_t __s2_77 = __p2_77; \ - uint32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ - uint32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ - uint32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ - __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \ - __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ +#define vmlsq_lane_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint16x8_t __ret_77; \ + uint16x8_t __s0_77 = __p0_77; \ + uint16x8_t __s1_77 = __p1_77; \ + uint16x4_t __s2_77 = __p2_77; \ + uint16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 3, 2, 1, 0); \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splatq_lane_u16(__rev2_77, __p3_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_77; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ - uint16x4_t __ret_78; \ - uint16x4_t __s0_78 = __p0_78; \ - uint16x4_t __s1_78 = __p1_78; \ - uint16x4_t __s2_78 = __p2_78; \ - __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \ +#define vmlsq_lane_f32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + float32x4_t __ret_78; \ + float32x4_t __s0_78 = __p0_78; \ + float32x4_t __s1_78 = __p1_78; \ + float32x2_t __s2_78 = __p2_78; \ + __ret_78 = __s0_78 - __s1_78 * splatq_lane_f32(__s2_78, __p3_78); \ __ret_78; \ }) #else -#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ - uint16x4_t __ret_79; \ - uint16x4_t __s0_79 = __p0_79; \ - uint16x4_t __s1_79 = __p1_79; \ - uint16x4_t __s2_79 = __p2_79; \ - uint16x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ - uint16x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ - uint16x4_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \ - __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \ +#define vmlsq_lane_f32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + float32x4_t __ret_79; \ + float32x4_t __s0_79 = __p0_79; \ + float32x4_t __s1_79 = __p1_79; \ + float32x2_t __s2_79 = __p2_79; \ + float32x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ + float32x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ + float32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splatq_lane_f32(__rev2_79, __p3_79); \ __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ __ret_79; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ - float32x2_t __ret_80; \ - float32x2_t __s0_80 = __p0_80; \ - float32x2_t __s1_80 = __p1_80; \ - float32x2_t __s2_80 = __p2_80; \ - __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \ +#define vmlsq_lane_s32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + int32x4_t __ret_80; \ + int32x4_t __s0_80 = __p0_80; \ + int32x4_t __s1_80 = __p1_80; \ + int32x2_t __s2_80 = __p2_80; \ + __ret_80 = __s0_80 - __s1_80 * splatq_lane_s32(__s2_80, __p3_80); \ __ret_80; \ }) #else -#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ - float32x2_t __ret_81; \ - float32x2_t __s0_81 = __p0_81; \ - float32x2_t __s1_81 = __p1_81; \ - float32x2_t __s2_81 = __p2_81; \ - float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \ - float32x2_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \ - float32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ - __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \ - __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \ +#define vmlsq_lane_s32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + int32x4_t __ret_81; \ + int32x4_t __s0_81 = __p0_81; \ + int32x4_t __s1_81 = __p1_81; \ + int32x2_t __s2_81 = __p2_81; \ + int32x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ + int32x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ + int32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splatq_lane_s32(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ __ret_81; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ - int32x2_t __ret_82; \ - int32x2_t __s0_82 = __p0_82; \ - int32x2_t __s1_82 = __p1_82; \ - int32x2_t __s2_82 = __p2_82; \ - __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \ +#define vmlsq_lane_s16(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int16x8_t __ret_82; \ + int16x8_t __s0_82 = __p0_82; \ + int16x8_t __s1_82 = __p1_82; \ + int16x4_t __s2_82 = __p2_82; \ + __ret_82 = __s0_82 - __s1_82 * splatq_lane_s16(__s2_82, __p3_82); \ __ret_82; \ }) #else -#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ - int32x2_t __ret_83; \ - int32x2_t __s0_83 = __p0_83; \ - int32x2_t __s1_83 = __p1_83; \ - int32x2_t __s2_83 = __p2_83; \ - int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \ - int32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ - int32x2_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \ - __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \ - __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \ +#define vmlsq_lane_s16(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int16x8_t __ret_83; \ + int16x8_t __s0_83 = __p0_83; \ + int16x8_t __s1_83 = __p1_83; \ + int16x4_t __s2_83 = __p2_83; \ + int16x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \ + __ret_83 = __rev0_83 - __rev1_83 * __noswap_splatq_lane_s16(__rev2_83, __p3_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_83; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ - int16x4_t __ret_84; \ - int16x4_t __s0_84 = __p0_84; \ - int16x4_t __s1_84 = __p1_84; \ - int16x4_t __s2_84 = __p2_84; \ - __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \ +#define vmls_lane_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + uint32x2_t __ret_84; \ + uint32x2_t __s0_84 = __p0_84; \ + uint32x2_t __s1_84 = __p1_84; \ + uint32x2_t __s2_84 = __p2_84; \ + __ret_84 = __s0_84 - __s1_84 * splat_lane_u32(__s2_84, __p3_84); \ __ret_84; \ }) #else -#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ - int16x4_t __ret_85; \ - int16x4_t __s0_85 = __p0_85; \ - int16x4_t __s1_85 = __p1_85; \ - int16x4_t __s2_85 = __p2_85; \ - int16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \ - int16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ - int16x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \ - __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \ - __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \ +#define vmls_lane_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + uint32x2_t __ret_85; \ + uint32x2_t __s0_85 = __p0_85; \ + uint32x2_t __s1_85 = __p1_85; \ + uint32x2_t __s2_85 = __p2_85; \ + uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 1, 0); \ + uint32x2_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 1, 0); \ + uint32x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \ + __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_u32(__rev2_85, __p3_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 1, 0); \ __ret_85; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { +#define vmls_lane_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \ + uint16x4_t __ret_86; \ + uint16x4_t __s0_86 = __p0_86; \ + uint16x4_t __s1_86 = __p1_86; \ + uint16x4_t __s2_86 = __p2_86; \ + __ret_86 = __s0_86 - __s1_86 * splat_lane_u16(__s2_86, __p3_86); \ + __ret_86; \ +}) +#else +#define vmls_lane_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \ + uint16x4_t __ret_87; \ + uint16x4_t __s0_87 = __p0_87; \ + uint16x4_t __s1_87 = __p1_87; \ + uint16x4_t __s2_87 = __p2_87; \ + uint16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + uint16x4_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 3, 2, 1, 0); \ + uint16x4_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 3, 2, 1, 0); \ + __ret_87 = __rev0_87 - __rev1_87 * __noswap_splat_lane_u16(__rev2_87, __p3_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \ + float32x2_t __ret_88; \ + float32x2_t __s0_88 = __p0_88; \ + float32x2_t __s1_88 = __p1_88; \ + float32x2_t __s2_88 = __p2_88; \ + __ret_88 = __s0_88 - __s1_88 * splat_lane_f32(__s2_88, __p3_88); \ + __ret_88; \ +}) +#else +#define vmls_lane_f32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \ + float32x2_t __ret_89; \ + float32x2_t __s0_89 = __p0_89; \ + float32x2_t __s1_89 = __p1_89; \ + float32x2_t __s2_89 = __p2_89; \ + float32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 1, 0); \ + float32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ + float32x2_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 1, 0); \ + __ret_89 = __rev0_89 - __rev1_89 * __noswap_splat_lane_f32(__rev2_89, __p3_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 1, 0); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \ + int32x2_t __ret_90; \ + int32x2_t __s0_90 = __p0_90; \ + int32x2_t __s1_90 = __p1_90; \ + int32x2_t __s2_90 = __p2_90; \ + __ret_90 = __s0_90 - __s1_90 * splat_lane_s32(__s2_90, __p3_90); \ + __ret_90; \ +}) +#else +#define vmls_lane_s32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \ + int32x2_t __ret_91; \ + int32x2_t __s0_91 = __p0_91; \ + int32x2_t __s1_91 = __p1_91; \ + int32x2_t __s2_91 = __p2_91; \ + int32x2_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \ + int32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ + int32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \ + __ret_91 = __rev0_91 - __rev1_91 * __noswap_splat_lane_s32(__rev2_91, __p3_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \ + int16x4_t __ret_92; \ + int16x4_t __s0_92 = __p0_92; \ + int16x4_t __s1_92 = __p1_92; \ + int16x4_t __s2_92 = __p2_92; \ + __ret_92 = __s0_92 - __s1_92 * splat_lane_s16(__s2_92, __p3_92); \ + __ret_92; \ +}) +#else +#define vmls_lane_s16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \ + int16x4_t __ret_93; \ + int16x4_t __s0_93 = __p0_93; \ + int16x4_t __s1_93 = __p1_93; \ + int16x4_t __s2_93 = __p2_93; \ + int16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ + int16x4_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 3, 2, 1, 0); \ + int16x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \ + __ret_93 = __rev0_93 - __rev1_93 * __noswap_splat_lane_s16(__rev2_93, __p3_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15290,13 +18551,13 @@ __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15307,13 +18568,13 @@ __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15324,13 +18585,13 @@ __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15341,13 +18602,13 @@ __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15358,13 +18619,13 @@ __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -15375,13 +18636,13 @@ __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15392,13 +18653,13 @@ __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -15409,13 +18670,13 @@ __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -15426,13 +18687,13 @@ __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -15443,13 +18704,13 @@ __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15458,13 +18719,13 @@ __ai poly8x8_t vmov_n_p8(poly8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15473,13 +18734,13 @@ __ai poly16x4_t vmov_n_p16(poly16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15488,13 +18749,13 @@ __ai poly8x16_t vmovq_n_p8(poly8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15503,13 +18764,13 @@ __ai poly16x8_t vmovq_n_p16(poly16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15518,13 +18779,13 @@ __ai uint8x16_t vmovq_n_u8(uint8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15533,13 +18794,13 @@ __ai uint32x4_t vmovq_n_u32(uint32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; return __ret; } #else -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -15548,13 +18809,13 @@ __ai uint64x2_t vmovq_n_u64(uint64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15563,13 +18824,13 @@ __ai uint16x8_t vmovq_n_u16(uint16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int8x16_t vmovq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { int8x16_t __ret; __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15578,13 +18839,13 @@ __ai int8x16_t vmovq_n_s8(int8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmovq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai float32x4_t vmovq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { float32x4_t __ret; __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15610,13 +18871,13 @@ __ai float32x4_t vmovq_n_f32(float32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int32x4_t vmovq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { int32x4_t __ret; __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15625,13 +18886,13 @@ __ai int32x4_t vmovq_n_s32(int32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; return __ret; } #else -__ai int64x2_t vmovq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { int64x2_t __ret; __ret = (int64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -15640,13 +18901,13 @@ __ai int64x2_t vmovq_n_s64(int64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x8_t vmovq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15655,13 +18916,13 @@ __ai int16x8_t vmovq_n_s16(int16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15670,13 +18931,13 @@ __ai uint8x8_t vmov_n_u8(uint8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; return __ret; } #else -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -15684,19 +18945,19 @@ __ai uint32x2_t vmov_n_u32(uint32_t __p0) { } #endif -__ai uint64x1_t vmov_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vmov_n_u64(uint64_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15705,13 +18966,13 @@ __ai uint16x4_t vmov_n_u16(uint16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmov_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int8x8_t vmov_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); @@ -15720,13 +18981,13 @@ __ai int8x8_t vmov_n_s8(int8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmov_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; return __ret; } #else -__ai float32x2_t vmov_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; __ret = (float32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -15752,13 +19013,13 @@ __ai float32x2_t vmov_n_f32(float32_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmov_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; return __ret; } #else -__ai int32x2_t vmov_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; __ret = (int32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -15766,19 +19027,19 @@ __ai int32x2_t vmov_n_s32(int32_t __p0) { } #endif -__ai int64x1_t vmov_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vmov_n_s64(int64_t __p0) { int64x1_t __ret; __ret = (int64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmov_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x4_t vmov_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); @@ -15787,20 +19048,20 @@ __ai int16x4_t vmov_n_s16(int16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); return __ret; @@ -15808,20 +19069,20 @@ __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); return __ret; @@ -15829,20 +19090,20 @@ __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); return __ret; @@ -15850,20 +19111,20 @@ __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); return __ret; } #else -__ai int16x8_t vmovl_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); return __ret; @@ -15871,20 +19132,20 @@ __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); return __ret; } #else -__ai int64x2_t vmovl_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); return __ret; @@ -15892,20 +19153,20 @@ __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); return __ret; } #else -__ai int32x4_t vmovl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); return __ret; @@ -15913,20 +19174,20 @@ __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; @@ -15934,20 +19195,20 @@ __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); return __ret; @@ -15955,20 +19216,20 @@ __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; @@ -15976,20 +19237,20 @@ __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; } #else -__ai int16x4_t vmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; @@ -15997,20 +19258,20 @@ __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); return __ret; } #else -__ai int32x2_t vmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); return __ret; @@ -16018,20 +19279,20 @@ __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); return __ret; } #else -__ai int8x8_t vmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); return __ret; @@ -16039,13 +19300,13 @@ __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16056,13 +19317,13 @@ __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16073,13 +19334,13 @@ __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16090,13 +19351,13 @@ __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16107,13 +19368,13 @@ __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16124,13 +19385,13 @@ __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16141,13 +19402,13 @@ __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16158,13 +19419,13 @@ __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16175,13 +19436,13 @@ __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -16192,13 +19453,13 @@ __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16209,13 +19470,13 @@ __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16226,13 +19487,13 @@ __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -16243,13 +19504,13 @@ __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -16260,13 +19521,13 @@ __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16277,13 +19538,13 @@ __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16294,13 +19555,13 @@ __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16311,223 +19572,223 @@ __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ - uint32x4_t __ret_86; \ - uint32x4_t __s0_86 = __p0_86; \ - uint32x2_t __s1_86 = __p1_86; \ - __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \ - __ret_86; \ -}) -#else -#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ - uint32x4_t __ret_87; \ - uint32x4_t __s0_87 = __p0_87; \ - uint32x2_t __s1_87 = __p1_87; \ - uint32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ - uint32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ - __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \ - __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ - __ret_87; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \ - uint16x8_t __ret_88; \ - uint16x8_t __s0_88 = __p0_88; \ - uint16x4_t __s1_88 = __p1_88; \ - __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \ - __ret_88; \ -}) -#else -#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \ - uint16x8_t __ret_89; \ - uint16x8_t __s0_89 = __p0_89; \ - uint16x4_t __s1_89 = __p1_89; \ - uint16x8_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \ - __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \ - __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_89; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \ - float32x4_t __ret_90; \ - float32x4_t __s0_90 = __p0_90; \ - float32x2_t __s1_90 = __p1_90; \ - __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \ - __ret_90; \ -}) -#else -#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \ - float32x4_t __ret_91; \ - float32x4_t __s0_91 = __p0_91; \ - float32x2_t __s1_91 = __p1_91; \ - float32x4_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \ - float32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ - __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \ - __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \ - __ret_91; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ - int32x4_t __ret_92; \ - int32x4_t __s0_92 = __p0_92; \ - int32x2_t __s1_92 = __p1_92; \ - __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \ - __ret_92; \ -}) -#else -#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ - int32x4_t __ret_93; \ - int32x4_t __s0_93 = __p0_93; \ - int32x2_t __s1_93 = __p1_93; \ - int32x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ - int32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ - __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \ - __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ - __ret_93; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ - int16x8_t __ret_94; \ - int16x8_t __s0_94 = __p0_94; \ - int16x4_t __s1_94 = __p1_94; \ - __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \ +#define vmulq_lane_u32(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + uint32x4_t __ret_94; \ + uint32x4_t __s0_94 = __p0_94; \ + uint32x2_t __s1_94 = __p1_94; \ + __ret_94 = __s0_94 * splatq_lane_u32(__s1_94, __p2_94); \ __ret_94; \ }) #else -#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ - int16x8_t __ret_95; \ - int16x8_t __s0_95 = __p0_95; \ - int16x4_t __s1_95 = __p1_95; \ - int16x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ - __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \ - __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmulq_lane_u32(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + uint32x4_t __ret_95; \ + uint32x4_t __s0_95 = __p0_95; \ + uint32x2_t __s1_95 = __p1_95; \ + uint32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ + uint32x2_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 1, 0); \ + __ret_95 = __rev0_95 * __noswap_splatq_lane_u32(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ __ret_95; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ - uint32x2_t __ret_96; \ - uint32x2_t __s0_96 = __p0_96; \ - uint32x2_t __s1_96 = __p1_96; \ - __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \ +#define vmulq_lane_u16(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + uint16x8_t __ret_96; \ + uint16x8_t __s0_96 = __p0_96; \ + uint16x4_t __s1_96 = __p1_96; \ + __ret_96 = __s0_96 * splatq_lane_u16(__s1_96, __p2_96); \ __ret_96; \ }) #else -#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ - uint32x2_t __ret_97; \ - uint32x2_t __s0_97 = __p0_97; \ - uint32x2_t __s1_97 = __p1_97; \ - uint32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ - uint32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ - __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \ - __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ +#define vmulq_lane_u16(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + uint16x8_t __ret_97; \ + uint16x8_t __s0_97 = __p0_97; \ + uint16x4_t __s1_97 = __p1_97; \ + uint16x8_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 3, 2, 1, 0); \ + __ret_97 = __rev0_97 * __noswap_splatq_lane_u16(__rev1_97, __p2_97); \ + __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_97; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \ - uint16x4_t __ret_98; \ - uint16x4_t __s0_98 = __p0_98; \ - uint16x4_t __s1_98 = __p1_98; \ - __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \ +#define vmulq_lane_f32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + float32x4_t __ret_98; \ + float32x4_t __s0_98 = __p0_98; \ + float32x2_t __s1_98 = __p1_98; \ + __ret_98 = __s0_98 * splatq_lane_f32(__s1_98, __p2_98); \ __ret_98; \ }) #else -#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \ - uint16x4_t __ret_99; \ - uint16x4_t __s0_99 = __p0_99; \ - uint16x4_t __s1_99 = __p1_99; \ - uint16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ - uint16x4_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \ - __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \ +#define vmulq_lane_f32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + float32x4_t __ret_99; \ + float32x4_t __s0_99 = __p0_99; \ + float32x2_t __s1_99 = __p1_99; \ + float32x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ + float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ + __ret_99 = __rev0_99 * __noswap_splatq_lane_f32(__rev1_99, __p2_99); \ __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ __ret_99; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ - float32x2_t __ret_100; \ - float32x2_t __s0_100 = __p0_100; \ - float32x2_t __s1_100 = __p1_100; \ - __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \ +#define vmulq_lane_s32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + int32x4_t __ret_100; \ + int32x4_t __s0_100 = __p0_100; \ + int32x2_t __s1_100 = __p1_100; \ + __ret_100 = __s0_100 * splatq_lane_s32(__s1_100, __p2_100); \ __ret_100; \ }) #else -#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ - float32x2_t __ret_101; \ - float32x2_t __s0_101 = __p0_101; \ - float32x2_t __s1_101 = __p1_101; \ - float32x2_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \ - float32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ - __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \ - __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \ +#define vmulq_lane_s32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + int32x4_t __ret_101; \ + int32x4_t __s0_101 = __p0_101; \ + int32x2_t __s1_101 = __p1_101; \ + int32x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ + int32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ + __ret_101 = __rev0_101 * __noswap_splatq_lane_s32(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ __ret_101; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ - int32x2_t __ret_102; \ - int32x2_t __s0_102 = __p0_102; \ - int32x2_t __s1_102 = __p1_102; \ - __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \ +#define vmulq_lane_s16(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + int16x8_t __ret_102; \ + int16x8_t __s0_102 = __p0_102; \ + int16x4_t __s1_102 = __p1_102; \ + __ret_102 = __s0_102 * splatq_lane_s16(__s1_102, __p2_102); \ __ret_102; \ }) #else -#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ - int32x2_t __ret_103; \ - int32x2_t __s0_103 = __p0_103; \ - int32x2_t __s1_103 = __p1_103; \ - int32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ - int32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ - __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \ - __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ +#define vmulq_lane_s16(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + int16x8_t __ret_103; \ + int16x8_t __s0_103 = __p0_103; \ + int16x4_t __s1_103 = __p1_103; \ + int16x8_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 3, 2, 1, 0); \ + __ret_103 = __rev0_103 * __noswap_splatq_lane_s16(__rev1_103, __p2_103); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_103; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ - int16x4_t __ret_104; \ - int16x4_t __s0_104 = __p0_104; \ - int16x4_t __s1_104 = __p1_104; \ - __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \ +#define vmul_lane_u32(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + uint32x2_t __ret_104; \ + uint32x2_t __s0_104 = __p0_104; \ + uint32x2_t __s1_104 = __p1_104; \ + __ret_104 = __s0_104 * splat_lane_u32(__s1_104, __p2_104); \ __ret_104; \ }) #else -#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ - int16x4_t __ret_105; \ - int16x4_t __s0_105 = __p0_105; \ - int16x4_t __s1_105 = __p1_105; \ - int16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ - int16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ - __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \ - __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ +#define vmul_lane_u32(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + uint32x2_t __ret_105; \ + uint32x2_t __s0_105 = __p0_105; \ + uint32x2_t __s1_105 = __p1_105; \ + uint32x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \ + uint32x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \ + __ret_105 = __rev0_105 * __noswap_splat_lane_u32(__rev1_105, __p2_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \ __ret_105; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { +#define vmul_lane_u16(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + uint16x4_t __ret_106; \ + uint16x4_t __s0_106 = __p0_106; \ + uint16x4_t __s1_106 = __p1_106; \ + __ret_106 = __s0_106 * splat_lane_u16(__s1_106, __p2_106); \ + __ret_106; \ +}) +#else +#define vmul_lane_u16(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + uint16x4_t __ret_107; \ + uint16x4_t __s0_107 = __p0_107; \ + uint16x4_t __s1_107 = __p1_107; \ + uint16x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \ + uint16x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \ + __ret_107 = __rev0_107 * __noswap_splat_lane_u16(__rev1_107, __p2_107); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + float32x2_t __ret_108; \ + float32x2_t __s0_108 = __p0_108; \ + float32x2_t __s1_108 = __p1_108; \ + __ret_108 = __s0_108 * splat_lane_f32(__s1_108, __p2_108); \ + __ret_108; \ +}) +#else +#define vmul_lane_f32(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + float32x2_t __ret_109; \ + float32x2_t __s0_109 = __p0_109; \ + float32x2_t __s1_109 = __p1_109; \ + float32x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 1, 0); \ + float32x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 1, 0); \ + __ret_109 = __rev0_109 * __noswap_splat_lane_f32(__rev1_109, __p2_109); \ + __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 1, 0); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ + int32x2_t __ret_110; \ + int32x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + __ret_110 = __s0_110 * splat_lane_s32(__s1_110, __p2_110); \ + __ret_110; \ +}) +#else +#define vmul_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ + int32x2_t __ret_111; \ + int32x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + __ret_111 = __rev0_111 * __noswap_splat_lane_s32(__rev1_111, __p2_111); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ + int16x4_t __ret_112; \ + int16x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + __ret_112 = __s0_112 * splat_lane_s16(__s1_112, __p2_112); \ + __ret_112; \ +}) +#else +#define vmul_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ + int16x4_t __ret_113; \ + int16x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + __ret_113 = __rev0_113 * __noswap_splat_lane_s16(__rev1_113, __p2_113); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { uint32x4_t __ret; __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; @@ -16537,13 +19798,13 @@ __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; @@ -16553,13 +19814,13 @@ __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { float32x4_t __ret; __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; @@ -16569,13 +19830,13 @@ __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; @@ -16585,13 +19846,13 @@ __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; @@ -16601,13 +19862,13 @@ __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; __ret = __p0 * (uint32x2_t) {__p1, __p1}; return __ret; } #else -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (uint32x2_t) {__p1, __p1}; @@ -16617,13 +19878,13 @@ __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; @@ -16633,13 +19894,13 @@ __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { float32x2_t __ret; __ret = __p0 * (float32x2_t) {__p1, __p1}; return __ret; } #else -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (float32x2_t) {__p1, __p1}; @@ -16649,13 +19910,13 @@ __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = __p0 * (int32x2_t) {__p1, __p1}; return __ret; } #else -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (int32x2_t) {__p1, __p1}; @@ -16665,13 +19926,13 @@ __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; @@ -16681,13 +19942,13 @@ __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; } #else -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16695,7 +19956,7 @@ __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { poly16x8_t __ret; __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; @@ -16703,13 +19964,13 @@ __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16717,7 +19978,7 @@ __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; @@ -16725,13 +19986,13 @@ __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -16739,7 +20000,7 @@ __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); return __ret; @@ -16747,13 +20008,13 @@ __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16761,7 +20022,7 @@ __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; @@ -16769,13 +20030,13 @@ __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; } #else -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -16783,7 +20044,7 @@ __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; @@ -16791,13 +20052,13 @@ __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -16805,7 +20066,7 @@ __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; @@ -16813,13 +20074,13 @@ __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -16827,7 +20088,7 @@ __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; @@ -16835,104 +20096,104 @@ __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ - uint64x2_t __ret_106; \ - uint32x2_t __s0_106 = __p0_106; \ - uint32x2_t __s1_106 = __p1_106; \ - __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \ - __ret_106; \ +#define vmull_lane_u32(__p0_114, __p1_114, __p2_114) __extension__ ({ \ + uint64x2_t __ret_114; \ + uint32x2_t __s0_114 = __p0_114; \ + uint32x2_t __s1_114 = __p1_114; \ + __ret_114 = vmull_u32(__s0_114, splat_lane_u32(__s1_114, __p2_114)); \ + __ret_114; \ }) #else -#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ - uint64x2_t __ret_107; \ - uint32x2_t __s0_107 = __p0_107; \ - uint32x2_t __s1_107 = __p1_107; \ - uint32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ - uint32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ - __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \ - __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ - __ret_107; \ +#define vmull_lane_u32(__p0_115, __p1_115, __p2_115) __extension__ ({ \ + uint64x2_t __ret_115; \ + uint32x2_t __s0_115 = __p0_115; \ + uint32x2_t __s1_115 = __p1_115; \ + uint32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + uint32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + __ret_115 = __noswap_vmull_u32(__rev0_115, __noswap_splat_lane_u32(__rev1_115, __p2_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ - uint32x4_t __ret_108; \ - uint16x4_t __s0_108 = __p0_108; \ - uint16x4_t __s1_108 = __p1_108; \ - __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \ - __ret_108; \ +#define vmull_lane_u16(__p0_116, __p1_116, __p2_116) __extension__ ({ \ + uint32x4_t __ret_116; \ + uint16x4_t __s0_116 = __p0_116; \ + uint16x4_t __s1_116 = __p1_116; \ + __ret_116 = vmull_u16(__s0_116, splat_lane_u16(__s1_116, __p2_116)); \ + __ret_116; \ }) #else -#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ - uint32x4_t __ret_109; \ - uint16x4_t __s0_109 = __p0_109; \ - uint16x4_t __s1_109 = __p1_109; \ - uint16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ - uint16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ - __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \ - __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ - __ret_109; \ +#define vmull_lane_u16(__p0_117, __p1_117, __p2_117) __extension__ ({ \ + uint32x4_t __ret_117; \ + uint16x4_t __s0_117 = __p0_117; \ + uint16x4_t __s1_117 = __p1_117; \ + uint16x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + uint16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + __ret_117 = __noswap_vmull_u16(__rev0_117, __noswap_splat_lane_u16(__rev1_117, __p2_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ - int64x2_t __ret_110; \ - int32x2_t __s0_110 = __p0_110; \ - int32x2_t __s1_110 = __p1_110; \ - __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \ - __ret_110; \ +#define vmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ + int64x2_t __ret_118; \ + int32x2_t __s0_118 = __p0_118; \ + int32x2_t __s1_118 = __p1_118; \ + __ret_118 = vmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ + __ret_118; \ }) #else -#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ - int64x2_t __ret_111; \ - int32x2_t __s0_111 = __p0_111; \ - int32x2_t __s1_111 = __p1_111; \ - int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ - int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ - __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \ - __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ - __ret_111; \ +#define vmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ + int64x2_t __ret_119; \ + int32x2_t __s0_119 = __p0_119; \ + int32x2_t __s1_119 = __p1_119; \ + int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ + __ret_119 = __noswap_vmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ + __ret_119; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ - int32x4_t __ret_112; \ - int16x4_t __s0_112 = __p0_112; \ - int16x4_t __s1_112 = __p1_112; \ - __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \ - __ret_112; \ +#define vmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ + int32x4_t __ret_120; \ + int16x4_t __s0_120 = __p0_120; \ + int16x4_t __s1_120 = __p1_120; \ + __ret_120 = vmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ + __ret_120; \ }) #else -#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ - int32x4_t __ret_113; \ - int16x4_t __s0_113 = __p0_113; \ - int16x4_t __s1_113 = __p1_113; \ - int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ - int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ - __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \ - __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ - __ret_113; \ +#define vmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ + int32x4_t __ret_121; \ + int16x4_t __s0_121 = __p0_121; \ + int16x4_t __s1_121 = __p1_121; \ + int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ + __ret_121 = __noswap_vmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ + __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ + __ret_121; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; } #else -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; @@ -16940,20 +20201,20 @@ __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; @@ -16961,20 +20222,20 @@ __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; @@ -16982,20 +20243,20 @@ __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; @@ -17003,13 +20264,13 @@ __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = ~__p0; return __ret; } #else -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17019,13 +20280,13 @@ __ai poly8x8_t vmvn_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = ~__p0; return __ret; } #else -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17035,13 +20296,13 @@ __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17051,13 +20312,13 @@ __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; @@ -17067,13 +20328,13 @@ __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17083,13 +20344,13 @@ __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = ~__p0; return __ret; } #else -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17099,13 +20360,13 @@ __ai int8x16_t vmvnq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = ~__p0; return __ret; } #else -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; @@ -17115,13 +20376,13 @@ __ai int32x4_t vmvnq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = ~__p0; return __ret; } #else -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17131,13 +20392,13 @@ __ai int16x8_t vmvnq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17147,13 +20408,13 @@ __ai uint8x8_t vmvn_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = ~__rev0; @@ -17163,13 +20424,13 @@ __ai uint32x2_t vmvn_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = ~__p0; return __ret; } #else -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; @@ -17179,13 +20440,13 @@ __ai uint16x4_t vmvn_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmvn_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { int8x8_t __ret; __ret = ~__p0; return __ret; } #else -__ai int8x8_t vmvn_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = ~__rev0; @@ -17195,13 +20456,13 @@ __ai int8x8_t vmvn_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmvn_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { int32x2_t __ret; __ret = ~__p0; return __ret; } #else -__ai int32x2_t vmvn_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = ~__rev0; @@ -17211,13 +20472,13 @@ __ai int32x2_t vmvn_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmvn_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { int16x4_t __ret; __ret = ~__p0; return __ret; } #else -__ai int16x4_t vmvn_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = ~__rev0; @@ -17227,13 +20488,13 @@ __ai int16x4_t vmvn_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = -__p0; return __ret; } #else -__ai int8x16_t vnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; @@ -17243,13 +20504,13 @@ __ai int8x16_t vnegq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vnegq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = -__p0; return __ret; } #else -__ai float32x4_t vnegq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; @@ -17259,13 +20520,13 @@ __ai float32x4_t vnegq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = -__p0; return __ret; } #else -__ai int32x4_t vnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; @@ -17275,13 +20536,13 @@ __ai int32x4_t vnegq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = -__p0; return __ret; } #else -__ai int16x8_t vnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; @@ -17291,13 +20552,13 @@ __ai int16x8_t vnegq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vneg_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { int8x8_t __ret; __ret = -__p0; return __ret; } #else -__ai int8x8_t vneg_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = -__rev0; @@ -17307,13 +20568,13 @@ __ai int8x8_t vneg_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vneg_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { float32x2_t __ret; __ret = -__p0; return __ret; } #else -__ai float32x2_t vneg_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; @@ -17323,13 +20584,13 @@ __ai float32x2_t vneg_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; __ret = -__p0; return __ret; } #else -__ai int32x2_t vneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; @@ -17339,13 +20600,13 @@ __ai int32x2_t vneg_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { int16x4_t __ret; __ret = -__p0; return __ret; } #else -__ai int16x4_t vneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = -__rev0; @@ -17355,13 +20616,13 @@ __ai int16x4_t vneg_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17372,13 +20633,13 @@ __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17389,13 +20650,13 @@ __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17406,13 +20667,13 @@ __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17423,13 +20684,13 @@ __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17440,13 +20701,13 @@ __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17457,13 +20718,13 @@ __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17474,13 +20735,13 @@ __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17491,13 +20752,13 @@ __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17508,13 +20769,13 @@ __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17524,19 +20785,19 @@ __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 | ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17547,13 +20808,13 @@ __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17564,13 +20825,13 @@ __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17580,19 +20841,19 @@ __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 | ~__p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17603,13 +20864,13 @@ __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17620,13 +20881,13 @@ __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17637,13 +20898,13 @@ __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17654,13 +20915,13 @@ __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17671,13 +20932,13 @@ __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17688,13 +20949,13 @@ __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17705,13 +20966,13 @@ __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17722,13 +20983,13 @@ __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17739,13 +21000,13 @@ __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17756,13 +21017,13 @@ __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17772,19 +21033,19 @@ __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17795,13 +21056,13 @@ __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17812,13 +21073,13 @@ __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -17828,19 +21089,19 @@ __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 | __p1; return __ret; } #else -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17851,13 +21112,13 @@ __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17868,13 +21129,13 @@ __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17885,13 +21146,13 @@ __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17902,13 +21163,13 @@ __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17919,13 +21180,13 @@ __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -17936,13 +21197,13 @@ __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17953,13 +21214,13 @@ __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -17970,13 +21231,13 @@ __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #else -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { uint64x1_t __ret; uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); @@ -17985,13 +21246,13 @@ __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18002,13 +21263,13 @@ __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18019,13 +21280,13 @@ __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #else -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { int64x1_t __ret; int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); @@ -18034,13 +21295,13 @@ __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18051,13 +21312,13 @@ __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18068,13 +21329,13 @@ __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18085,13 +21346,13 @@ __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18102,13 +21363,13 @@ __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18119,13 +21380,13 @@ __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18136,13 +21397,13 @@ __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18153,13 +21414,13 @@ __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18170,13 +21431,13 @@ __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); @@ -18186,13 +21447,13 @@ __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); @@ -18202,13 +21463,13 @@ __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); @@ -18218,13 +21479,13 @@ __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); @@ -18234,13 +21495,13 @@ __ai int16x8_t vpaddlq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); @@ -18250,13 +21511,13 @@ __ai int64x2_t vpaddlq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); @@ -18266,13 +21527,13 @@ __ai int32x4_t vpaddlq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { uint16x4_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); @@ -18282,13 +21543,13 @@ __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); return __ret; } #else -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { uint64x1_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); @@ -18297,13 +21558,13 @@ __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { uint32x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); @@ -18313,13 +21574,13 @@ __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { int16x4_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); @@ -18329,13 +21590,13 @@ __ai int16x4_t vpaddl_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); return __ret; } #else -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { int64x1_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); @@ -18344,13 +21605,13 @@ __ai int64x1_t vpaddl_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { int32x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); @@ -18360,13 +21621,13 @@ __ai int32x2_t vpaddl_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18377,13 +21638,13 @@ __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18394,13 +21655,13 @@ __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18411,13 +21672,13 @@ __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18428,13 +21689,13 @@ __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18445,13 +21706,13 @@ __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18462,13 +21723,13 @@ __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18479,13 +21740,13 @@ __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18496,13 +21757,13 @@ __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18513,13 +21774,13 @@ __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18530,13 +21791,13 @@ __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18547,13 +21808,13 @@ __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18564,13 +21825,13 @@ __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18581,13 +21842,13 @@ __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18598,13 +21859,13 @@ __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); @@ -18614,13 +21875,13 @@ __ai int8x16_t vqabsq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); @@ -18630,13 +21891,13 @@ __ai int32x4_t vqabsq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); @@ -18646,13 +21907,13 @@ __ai int16x8_t vqabsq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqabs_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vqabs_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); @@ -18662,13 +21923,13 @@ __ai int8x8_t vqabs_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqabs_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vqabs_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); @@ -18678,13 +21939,13 @@ __ai int32x2_t vqabs_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqabs_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vqabs_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); @@ -18694,13 +21955,13 @@ __ai int16x4_t vqabs_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18711,13 +21972,13 @@ __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18728,13 +21989,13 @@ __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18745,13 +22006,13 @@ __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18762,13 +22023,13 @@ __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18779,13 +22040,13 @@ __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18796,13 +22057,13 @@ __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18813,13 +22074,13 @@ __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18830,13 +22091,13 @@ __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18847,13 +22108,13 @@ __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18863,19 +22124,19 @@ __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18886,13 +22147,13 @@ __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -18903,13 +22164,13 @@ __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18919,19 +22180,19 @@ __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18942,13 +22203,13 @@ __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #else -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -18957,7 +22218,7 @@ __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; @@ -18965,13 +22226,13 @@ __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #else -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -18980,7 +22241,7 @@ __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; @@ -18988,61 +22249,61 @@ __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ - int64x2_t __ret_114; \ - int64x2_t __s0_114 = __p0_114; \ - int32x2_t __s1_114 = __p1_114; \ - int32x2_t __s2_114 = __p2_114; \ - __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ - __ret_114; \ +#define vqdmlal_lane_s32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \ + int64x2_t __ret_122; \ + int64x2_t __s0_122 = __p0_122; \ + int32x2_t __s1_122 = __p1_122; \ + int32x2_t __s2_122 = __p2_122; \ + __ret_122 = vqdmlal_s32(__s0_122, __s1_122, splat_lane_s32(__s2_122, __p3_122)); \ + __ret_122; \ }) #else -#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ - int64x2_t __ret_115; \ - int64x2_t __s0_115 = __p0_115; \ - int32x2_t __s1_115 = __p1_115; \ - int32x2_t __s2_115 = __p2_115; \ - int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ - int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ - int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ - __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ - __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ - __ret_115; \ +#define vqdmlal_lane_s32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \ + int64x2_t __ret_123; \ + int64x2_t __s0_123 = __p0_123; \ + int32x2_t __s1_123 = __p1_123; \ + int32x2_t __s2_123 = __p2_123; \ + int64x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ + int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ + int32x2_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 1, 0); \ + __ret_123 = __noswap_vqdmlal_s32(__rev0_123, __rev1_123, __noswap_splat_lane_s32(__rev2_123, __p3_123)); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ + __ret_123; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ - int32x4_t __ret_116; \ - int32x4_t __s0_116 = __p0_116; \ - int16x4_t __s1_116 = __p1_116; \ - int16x4_t __s2_116 = __p2_116; \ - __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ - __ret_116; \ +#define vqdmlal_lane_s16(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \ + int32x4_t __ret_124; \ + int32x4_t __s0_124 = __p0_124; \ + int16x4_t __s1_124 = __p1_124; \ + int16x4_t __s2_124 = __p2_124; \ + __ret_124 = vqdmlal_s16(__s0_124, __s1_124, splat_lane_s16(__s2_124, __p3_124)); \ + __ret_124; \ }) #else -#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ - int32x4_t __ret_117; \ - int32x4_t __s0_117 = __p0_117; \ - int16x4_t __s1_117 = __p1_117; \ - int16x4_t __s2_117 = __p2_117; \ - int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ - int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ - int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ - __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ - __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ - __ret_117; \ +#define vqdmlal_lane_s16(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \ + int32x4_t __ret_125; \ + int32x4_t __s0_125 = __p0_125; \ + int16x4_t __s1_125 = __p1_125; \ + int16x4_t __s2_125 = __p2_125; \ + int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ + int16x4_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, 3, 2, 1, 0); \ + __ret_125 = __noswap_vqdmlal_s16(__rev0_125, __rev1_125, __noswap_splat_lane_s16(__rev2_125, __p3_125)); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19050,7 +22311,7 @@ __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -19058,13 +22319,13 @@ __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19072,7 +22333,7 @@ __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -19080,13 +22341,13 @@ __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #else -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19095,7 +22356,7 @@ __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; @@ -19103,13 +22364,13 @@ __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #else -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19118,7 +22379,7 @@ __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; @@ -19126,61 +22387,61 @@ __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \ - int64x2_t __ret_118; \ - int64x2_t __s0_118 = __p0_118; \ - int32x2_t __s1_118 = __p1_118; \ - int32x2_t __s2_118 = __p2_118; \ - __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \ - __ret_118; \ +#define vqdmlsl_lane_s32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ + int64x2_t __ret_126; \ + int64x2_t __s0_126 = __p0_126; \ + int32x2_t __s1_126 = __p1_126; \ + int32x2_t __s2_126 = __p2_126; \ + __ret_126 = vqdmlsl_s32(__s0_126, __s1_126, splat_lane_s32(__s2_126, __p3_126)); \ + __ret_126; \ }) #else -#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \ - int64x2_t __ret_119; \ - int64x2_t __s0_119 = __p0_119; \ - int32x2_t __s1_119 = __p1_119; \ - int32x2_t __s2_119 = __p2_119; \ - int64x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ - int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ - int32x2_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \ - __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \ - __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ - __ret_119; \ +#define vqdmlsl_lane_s32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ + int64x2_t __ret_127; \ + int64x2_t __s0_127 = __p0_127; \ + int32x2_t __s1_127 = __p1_127; \ + int32x2_t __s2_127 = __p2_127; \ + int64x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 1, 0); \ + int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ + int32x2_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 1, 0); \ + __ret_127 = __noswap_vqdmlsl_s32(__rev0_127, __rev1_127, __noswap_splat_lane_s32(__rev2_127, __p3_127)); \ + __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 1, 0); \ + __ret_127; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \ - int32x4_t __ret_120; \ - int32x4_t __s0_120 = __p0_120; \ - int16x4_t __s1_120 = __p1_120; \ - int16x4_t __s2_120 = __p2_120; \ - __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \ - __ret_120; \ +#define vqdmlsl_lane_s16(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ + int32x4_t __ret_128; \ + int32x4_t __s0_128 = __p0_128; \ + int16x4_t __s1_128 = __p1_128; \ + int16x4_t __s2_128 = __p2_128; \ + __ret_128 = vqdmlsl_s16(__s0_128, __s1_128, splat_lane_s16(__s2_128, __p3_128)); \ + __ret_128; \ }) #else -#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \ - int32x4_t __ret_121; \ - int32x4_t __s0_121 = __p0_121; \ - int16x4_t __s1_121 = __p1_121; \ - int16x4_t __s2_121 = __p2_121; \ - int32x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ - int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ - int16x4_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \ - __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \ - __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ - __ret_121; \ +#define vqdmlsl_lane_s16(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ + int32x4_t __ret_129; \ + int32x4_t __s0_129 = __p0_129; \ + int16x4_t __s1_129 = __p1_129; \ + int16x4_t __s2_129 = __p2_129; \ + int32x4_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 3, 2, 1, 0); \ + int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + int16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ + __ret_129 = __noswap_vqdmlsl_s16(__rev0_129, __rev1_129, __noswap_splat_lane_s16(__rev2_129, __p3_129)); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 3, 2, 1, 0); \ + __ret_129; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19188,7 +22449,7 @@ __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -19196,13 +22457,13 @@ __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19210,7 +22471,7 @@ __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -19218,13 +22479,13 @@ __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19232,7 +22493,7 @@ __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; @@ -19240,13 +22501,13 @@ __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -19254,7 +22515,7 @@ __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; @@ -19262,13 +22523,13 @@ __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19276,7 +22537,7 @@ __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; @@ -19284,13 +22545,13 @@ __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19298,7 +22559,7 @@ __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; @@ -19306,13 +22567,13 @@ __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); @@ -19322,13 +22583,13 @@ __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); @@ -19338,13 +22599,13 @@ __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); @@ -19354,13 +22615,13 @@ __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); @@ -19370,13 +22631,13 @@ __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19384,7 +22645,7 @@ __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; @@ -19392,13 +22653,13 @@ __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19406,7 +22667,7 @@ __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; @@ -19414,62 +22675,62 @@ __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \ - int64x2_t __ret_122; \ - int32x2_t __s0_122 = __p0_122; \ - int32x2_t __s1_122 = __p1_122; \ - __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \ - __ret_122; \ +#define vqdmull_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ + int64x2_t __ret_130; \ + int32x2_t __s0_130 = __p0_130; \ + int32x2_t __s1_130 = __p1_130; \ + __ret_130 = vqdmull_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ + __ret_130; \ }) #else -#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \ - int64x2_t __ret_123; \ - int32x2_t __s0_123 = __p0_123; \ - int32x2_t __s1_123 = __p1_123; \ - int32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ - int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ - __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \ - __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ - __ret_123; \ +#define vqdmull_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ + int64x2_t __ret_131; \ + int32x2_t __s0_131 = __p0_131; \ + int32x2_t __s1_131 = __p1_131; \ + int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ + int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ + __ret_131 = __noswap_vqdmull_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ + __ret_131; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \ - int32x4_t __ret_124; \ - int16x4_t __s0_124 = __p0_124; \ - int16x4_t __s1_124 = __p1_124; \ - __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \ - __ret_124; \ +#define vqdmull_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ + int32x4_t __ret_132; \ + int16x4_t __s0_132 = __p0_132; \ + int16x4_t __s1_132 = __p1_132; \ + __ret_132 = vqdmull_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ + __ret_132; \ }) #else -#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \ - int32x4_t __ret_125; \ - int16x4_t __s0_125 = __p0_125; \ - int16x4_t __s1_125 = __p1_125; \ - int16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ - int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ - __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \ - __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ - __ret_125; \ +#define vqdmull_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ + int32x4_t __ret_133; \ + int16x4_t __s0_133 = __p0_133; \ + int16x4_t __s1_133 = __p1_133; \ + int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ + int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + __ret_133 = __noswap_vqdmull_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ + __ret_133; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { int64x2_t __ret; __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; @@ -19477,20 +22738,20 @@ __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; @@ -19498,20 +22759,20 @@ __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; @@ -19519,20 +22780,20 @@ __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; @@ -19540,20 +22801,20 @@ __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; @@ -19561,20 +22822,20 @@ __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; } #else -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; @@ -19582,20 +22843,20 @@ __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; } #else -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; @@ -19603,20 +22864,20 @@ __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; } #else -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; @@ -19624,20 +22885,20 @@ __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; @@ -19645,20 +22906,20 @@ __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; @@ -19666,20 +22927,20 @@ __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; @@ -19687,13 +22948,13 @@ __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); @@ -19703,13 +22964,13 @@ __ai int8x16_t vqnegq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); @@ -19719,13 +22980,13 @@ __ai int32x4_t vqnegq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); @@ -19735,13 +22996,13 @@ __ai int16x8_t vqnegq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqneg_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vqneg_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); @@ -19751,13 +23012,13 @@ __ai int8x8_t vqneg_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vqneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); @@ -19767,13 +23028,13 @@ __ai int32x2_t vqneg_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vqneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); @@ -19783,13 +23044,13 @@ __ai int16x4_t vqneg_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19797,7 +23058,7 @@ __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; @@ -19805,13 +23066,13 @@ __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -19819,7 +23080,7 @@ __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; @@ -19827,13 +23088,13 @@ __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19841,7 +23102,7 @@ __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; @@ -19849,13 +23110,13 @@ __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19863,7 +23124,7 @@ __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; @@ -19871,13 +23132,13 @@ __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); @@ -19887,13 +23148,13 @@ __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); @@ -19903,13 +23164,13 @@ __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); @@ -19919,13 +23180,13 @@ __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); @@ -19935,13 +23196,13 @@ __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -19952,13 +23213,13 @@ __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -19969,13 +23230,13 @@ __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -19986,13 +23247,13 @@ __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20003,13 +23264,13 @@ __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20020,13 +23281,13 @@ __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20037,13 +23298,13 @@ __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20054,13 +23315,13 @@ __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20071,13 +23332,13 @@ __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20088,13 +23349,13 @@ __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20104,19 +23365,19 @@ __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { } #endif -__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20127,13 +23388,13 @@ __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20144,13 +23405,13 @@ __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20160,19 +23421,19 @@ __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20399,13 +23660,13 @@ __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20416,13 +23677,13 @@ __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20433,13 +23694,13 @@ __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20450,13 +23711,13 @@ __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20467,13 +23728,13 @@ __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20484,13 +23745,13 @@ __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20501,13 +23762,13 @@ __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20518,13 +23779,13 @@ __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20535,13 +23796,13 @@ __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20552,13 +23813,13 @@ __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20568,19 +23829,19 @@ __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { } #endif -__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -20591,13 +23852,13 @@ __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -20608,13 +23869,13 @@ __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -20624,19 +23885,19 @@ __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21259,13 +24520,13 @@ __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21276,13 +24537,13 @@ __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21293,13 +24554,13 @@ __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21310,13 +24571,13 @@ __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21327,13 +24588,13 @@ __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21344,13 +24605,13 @@ __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21361,13 +24622,13 @@ __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21378,13 +24639,13 @@ __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21395,13 +24656,13 @@ __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21412,13 +24673,13 @@ __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21428,19 +24689,19 @@ __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21451,13 +24712,13 @@ __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21468,13 +24729,13 @@ __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21484,19 +24745,19 @@ __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21507,13 +24768,13 @@ __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21521,7 +24782,7 @@ __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; @@ -21529,13 +24790,13 @@ __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21543,7 +24804,7 @@ __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; @@ -21551,13 +24812,13 @@ __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21565,7 +24826,7 @@ __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; @@ -21573,13 +24834,13 @@ __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21587,7 +24848,7 @@ __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; @@ -21595,13 +24856,13 @@ __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21609,7 +24870,7 @@ __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; @@ -21617,13 +24878,13 @@ __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -21631,7 +24892,7 @@ __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; @@ -21639,13 +24900,13 @@ __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); @@ -21655,13 +24916,13 @@ __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); @@ -21671,13 +24932,13 @@ __ai float32x4_t vrecpeq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); @@ -21687,13 +24948,13 @@ __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); @@ -21703,13 +24964,13 @@ __ai float32x2_t vrecpe_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -21720,13 +24981,13 @@ __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -21737,13 +24998,13 @@ __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21753,13 +25014,13 @@ __ai poly8x8_t vrev16_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); @@ -21769,13 +25030,13 @@ __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); @@ -21785,13 +25046,13 @@ __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); return __ret; } #else -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); @@ -21801,13 +25062,13 @@ __ai int8x16_t vrev16q_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21817,13 +25078,13 @@ __ai uint8x8_t vrev16_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai int8x8_t vrev16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21833,13 +25094,13 @@ __ai int8x8_t vrev16_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -21849,13 +25110,13 @@ __ai poly8x8_t vrev32_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -21865,13 +25126,13 @@ __ai poly16x4_t vrev32_p16(poly16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); @@ -21881,13 +25142,13 @@ __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21897,13 +25158,13 @@ __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); @@ -21913,13 +25174,13 @@ __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21929,13 +25190,13 @@ __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); @@ -21945,13 +25206,13 @@ __ai int8x16_t vrev32q_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); @@ -21961,13 +25222,13 @@ __ai int16x8_t vrev32q_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -21977,13 +25238,13 @@ __ai uint8x8_t vrev32_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -21993,13 +25254,13 @@ __ai uint16x4_t vrev32_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai int8x8_t vrev32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -22009,13 +25270,13 @@ __ai int8x8_t vrev32_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai int16x4_t vrev32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -22025,13 +25286,13 @@ __ai int16x4_t vrev32_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22041,13 +25302,13 @@ __ai poly8x8_t vrev64_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); @@ -22057,13 +25318,13 @@ __ai poly16x4_t vrev64_p16(poly16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); @@ -22073,13 +25334,13 @@ __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -22089,13 +25350,13 @@ __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); @@ -22105,13 +25366,13 @@ __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -22121,13 +25382,13 @@ __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -22137,13 +25398,13 @@ __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); return __ret; } #else -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); @@ -22153,13 +25414,13 @@ __ai int8x16_t vrev64q_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -22169,13 +25430,13 @@ __ai float32x4_t vrev64q_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); @@ -22185,13 +25446,13 @@ __ai int32x4_t vrev64q_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); @@ -22201,13 +25462,13 @@ __ai int16x8_t vrev64q_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22217,13 +25478,13 @@ __ai uint8x8_t vrev64_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); @@ -22233,13 +25494,13 @@ __ai uint32x2_t vrev64_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); @@ -22249,13 +25510,13 @@ __ai uint16x4_t vrev64_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #else -__ai int8x8_t vrev64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22265,13 +25526,13 @@ __ai int8x8_t vrev64_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrev64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else -__ai float32x2_t vrev64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); @@ -22281,13 +25542,13 @@ __ai float32x2_t vrev64_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrev64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1, 0); return __ret; } #else -__ai int32x2_t vrev64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); @@ -22297,13 +25558,13 @@ __ai int32x2_t vrev64_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); return __ret; } #else -__ai int16x4_t vrev64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); @@ -22313,13 +25574,45 @@ __ai int16x4_t vrev64_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22330,13 +25623,13 @@ __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22347,13 +25640,13 @@ __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22364,13 +25657,13 @@ __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22381,13 +25674,13 @@ __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22398,13 +25691,13 @@ __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22415,13 +25708,13 @@ __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22432,13 +25725,13 @@ __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22449,13 +25742,13 @@ __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22466,13 +25759,13 @@ __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22483,13 +25776,13 @@ __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22500,13 +25793,13 @@ __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22517,13 +25810,13 @@ __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22534,13 +25827,13 @@ __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22551,13 +25844,13 @@ __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22568,13 +25861,13 @@ __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22585,13 +25878,13 @@ __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22602,13 +25895,13 @@ __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22619,13 +25912,13 @@ __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22636,13 +25929,13 @@ __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22653,13 +25946,13 @@ __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22670,13 +25963,13 @@ __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22686,19 +25979,19 @@ __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { } #endif -__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -22709,13 +26002,13 @@ __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -22726,13 +26019,13 @@ __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -22742,19 +26035,19 @@ __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -23173,13 +26466,13 @@ __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); @@ -23189,13 +26482,13 @@ __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); @@ -23205,13 +26498,13 @@ __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); @@ -23221,13 +26514,13 @@ __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); @@ -23237,13 +26530,13 @@ __ai float32x2_t vrsqrte_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -23254,13 +26547,13 @@ __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -23579,13 +26872,13 @@ __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -23593,7 +26886,7 @@ __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; @@ -23601,13 +26894,13 @@ __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -23615,7 +26908,7 @@ __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; @@ -23623,13 +26916,13 @@ __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -23637,7 +26930,7 @@ __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; @@ -23645,13 +26938,13 @@ __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -23659,7 +26952,7 @@ __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; @@ -23667,13 +26960,13 @@ __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -23681,7 +26974,7 @@ __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; @@ -23689,13 +26982,13 @@ __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -23703,7 +26996,7 @@ __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; @@ -24265,13 +27558,13 @@ __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24282,13 +27575,13 @@ __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -24299,13 +27592,13 @@ __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -24316,13 +27609,13 @@ __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24333,13 +27626,13 @@ __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24350,13 +27643,13 @@ __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -24367,13 +27660,13 @@ __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -24384,13 +27677,13 @@ __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24401,13 +27694,13 @@ __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24418,13 +27711,13 @@ __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -24434,19 +27727,19 @@ __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { } #endif -__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -24457,13 +27750,13 @@ __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -24474,13 +27767,13 @@ __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -24490,19 +27783,19 @@ __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29549,13 +32842,13 @@ __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29566,13 +32859,13 @@ __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29583,13 +32876,13 @@ __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29600,13 +32893,13 @@ __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29617,13 +32910,13 @@ __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29634,13 +32927,13 @@ __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29651,13 +32944,13 @@ __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29668,13 +32961,13 @@ __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29685,13 +32978,13 @@ __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29702,13 +32995,13 @@ __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29719,13 +33012,13 @@ __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29735,19 +33028,19 @@ __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { } #endif -__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29758,13 +33051,13 @@ __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29775,13 +33068,13 @@ __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29792,13 +33085,13 @@ __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29808,19 +33101,19 @@ __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { } #endif -__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29831,13 +33124,13 @@ __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29845,7 +33138,7 @@ __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; @@ -29853,13 +33146,13 @@ __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29867,7 +33160,7 @@ __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; @@ -29875,13 +33168,13 @@ __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29889,7 +33182,7 @@ __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; @@ -29897,13 +33190,13 @@ __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -29911,7 +33204,7 @@ __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; @@ -29919,13 +33212,13 @@ __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29933,7 +33226,7 @@ __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; @@ -29941,13 +33234,13 @@ __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29955,7 +33248,7 @@ __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; @@ -29963,13 +33256,13 @@ __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = vmovl_u8(__p0) - vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -29980,13 +33273,13 @@ __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = vmovl_u32(__p0) - vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -29997,13 +33290,13 @@ __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = vmovl_u16(__p0) - vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30014,13 +33307,13 @@ __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = vmovl_s8(__p0) - vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30031,13 +33324,13 @@ __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = vmovl_s32(__p0) - vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30048,13 +33341,13 @@ __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = vmovl_s16(__p0) - vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30065,13 +33358,13 @@ __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = __p0 - vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30082,13 +33375,13 @@ __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = __p0 - vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30099,13 +33392,13 @@ __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = __p0 - vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30116,13 +33409,13 @@ __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = __p0 - vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30133,13 +33426,13 @@ __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = __p0 - vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30150,13 +33443,13 @@ __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = __p0 - vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30167,13 +33460,13 @@ __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30184,13 +33477,13 @@ __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30201,13 +33494,13 @@ __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30218,13 +33511,13 @@ __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30237,13 +33530,13 @@ __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30256,13 +33549,13 @@ __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30275,13 +33568,13 @@ __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30295,13 +33588,13 @@ __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30315,13 +33608,13 @@ __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30335,13 +33628,13 @@ __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30356,13 +33649,13 @@ __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30377,13 +33670,13 @@ __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); @@ -30398,13 +33691,13 @@ __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30416,13 +33709,13 @@ __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30434,13 +33727,13 @@ __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30452,13 +33745,13 @@ __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x2_t __rev1; @@ -30472,13 +33765,13 @@ __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x2_t __rev1; @@ -30492,13 +33785,13 @@ __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x2_t __rev1; @@ -30512,13 +33805,13 @@ __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x3_t __rev1; @@ -30533,13 +33826,13 @@ __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x3_t __rev1; @@ -30554,13 +33847,13 @@ __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x3_t __rev1; @@ -30575,13 +33868,13 @@ __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8x4_t __rev1; @@ -30597,13 +33890,13 @@ __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8x4_t __rev1; @@ -30619,13 +33912,13 @@ __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8x4_t __rev1; @@ -30641,13 +33934,13 @@ __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30660,13 +33953,13 @@ __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30679,13 +33972,13 @@ __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30698,13 +33991,13 @@ __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30717,13 +34010,13 @@ __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30736,13 +34029,13 @@ __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30755,13 +34048,13 @@ __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30774,13 +34067,13 @@ __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30793,13 +34086,13 @@ __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30812,13 +34105,13 @@ __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30831,13 +34124,13 @@ __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30850,13 +34143,13 @@ __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30869,13 +34162,13 @@ __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30888,13 +34181,13 @@ __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30907,13 +34200,13 @@ __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -30926,13 +34219,13 @@ __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30945,13 +34238,13 @@ __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -30964,13 +34257,13 @@ __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -30983,13 +34276,51 @@ __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31000,13 +34331,13 @@ __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { uint16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31017,13 +34348,13 @@ __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31034,13 +34365,13 @@ __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { uint16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31051,13 +34382,13 @@ __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31068,13 +34399,13 @@ __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31085,13 +34416,13 @@ __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31102,13 +34433,13 @@ __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31119,13 +34450,13 @@ __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31136,13 +34467,13 @@ __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31153,13 +34484,13 @@ __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31170,13 +34501,13 @@ __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31187,13 +34518,13 @@ __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31204,13 +34535,13 @@ __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31221,13 +34552,13 @@ __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31238,13 +34569,13 @@ __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31255,13 +34586,13 @@ __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31274,13 +34605,13 @@ __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31293,13 +34624,13 @@ __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31312,13 +34643,13 @@ __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31331,13 +34662,13 @@ __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31350,13 +34681,13 @@ __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31369,13 +34700,13 @@ __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31388,13 +34719,13 @@ __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31407,13 +34738,13 @@ __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31426,13 +34757,13 @@ __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31445,13 +34776,13 @@ __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31464,13 +34795,13 @@ __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31483,13 +34814,13 @@ __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31502,13 +34833,13 @@ __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31521,13 +34852,13 @@ __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31540,13 +34871,13 @@ __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31559,13 +34890,13 @@ __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31578,13 +34909,13 @@ __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31597,13 +34928,51 @@ __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8x2_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31616,13 +34985,13 @@ __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4x2_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31635,13 +35004,13 @@ __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16x2_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31654,13 +35023,13 @@ __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8x2_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31673,13 +35042,13 @@ __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16x2_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31692,13 +35061,13 @@ __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31711,13 +35080,13 @@ __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8x2_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31730,13 +35099,13 @@ __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16x2_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31749,13 +35118,13 @@ __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31768,13 +35137,13 @@ __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31787,13 +35156,13 @@ __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8x2_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31806,13 +35175,13 @@ __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8x2_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31825,13 +35194,13 @@ __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31844,13 +35213,13 @@ __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4x2_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31863,13 +35232,13 @@ __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { int8x8x2_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -31882,13 +35251,13 @@ __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { float32x2x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31901,13 +35270,13 @@ __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { int32x2x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -31920,13 +35289,13 @@ __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { int16x4x2_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -31939,3507 +35308,17 @@ __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#else -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#else -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#else -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#else -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdotq_lane_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ - float32x4_t __ret_126; \ - float32x4_t __s0_126 = __p0_126; \ - bfloat16x8_t __s1_126 = __p1_126; \ - bfloat16x4_t __s2_126 = __p2_126; \ -bfloat16x4_t __reint_126 = __s2_126; \ -float32x4_t __reint1_126 = splatq_lane_f32(*(float32x2_t *) &__reint_126, __p3_126); \ - __ret_126 = vbfdotq_f32(__s0_126, __s1_126, *(bfloat16x8_t *) &__reint1_126); \ - __ret_126; \ -}) -#else -#define vbfdotq_lane_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ - float32x4_t __ret_127; \ - float32x4_t __s0_127 = __p0_127; \ - bfloat16x8_t __s1_127 = __p1_127; \ - bfloat16x4_t __s2_127 = __p2_127; \ - float32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \ -bfloat16x4_t __reint_127 = __rev2_127; \ -float32x4_t __reint1_127 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_127, __p3_127); \ - __ret_127 = __noswap_vbfdotq_f32(__rev0_127, __rev1_127, *(bfloat16x8_t *) &__reint1_127); \ - __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ - __ret_127; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdot_lane_f32(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ - float32x2_t __ret_128; \ - float32x2_t __s0_128 = __p0_128; \ - bfloat16x4_t __s1_128 = __p1_128; \ - bfloat16x4_t __s2_128 = __p2_128; \ -bfloat16x4_t __reint_128 = __s2_128; \ -float32x2_t __reint1_128 = splat_lane_f32(*(float32x2_t *) &__reint_128, __p3_128); \ - __ret_128 = vbfdot_f32(__s0_128, __s1_128, *(bfloat16x4_t *) &__reint1_128); \ - __ret_128; \ -}) -#else -#define vbfdot_lane_f32(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ - float32x2_t __ret_129; \ - float32x2_t __s0_129 = __p0_129; \ - bfloat16x4_t __s1_129 = __p1_129; \ - bfloat16x4_t __s2_129 = __p2_129; \ - float32x2_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 1, 0); \ - bfloat16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ -bfloat16x4_t __reint_129 = __rev2_129; \ -float32x2_t __reint1_129 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_129, __p3_129); \ - __ret_129 = __noswap_vbfdot_f32(__rev0_129, __rev1_129, *(bfloat16x4_t *) &__reint1_129); \ - __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 1, 0); \ - __ret_129; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdotq_laneq_f32(__p0_130, __p1_130, __p2_130, __p3_130) __extension__ ({ \ - float32x4_t __ret_130; \ - float32x4_t __s0_130 = __p0_130; \ - bfloat16x8_t __s1_130 = __p1_130; \ - bfloat16x8_t __s2_130 = __p2_130; \ -bfloat16x8_t __reint_130 = __s2_130; \ -float32x4_t __reint1_130 = splatq_laneq_f32(*(float32x4_t *) &__reint_130, __p3_130); \ - __ret_130 = vbfdotq_f32(__s0_130, __s1_130, *(bfloat16x8_t *) &__reint1_130); \ - __ret_130; \ -}) -#else -#define vbfdotq_laneq_f32(__p0_131, __p1_131, __p2_131, __p3_131) __extension__ ({ \ - float32x4_t __ret_131; \ - float32x4_t __s0_131 = __p0_131; \ - bfloat16x8_t __s1_131 = __p1_131; \ - bfloat16x8_t __s2_131 = __p2_131; \ - float32x4_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_131; __rev2_131 = __builtin_shufflevector(__s2_131, __s2_131, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_131 = __rev2_131; \ -float32x4_t __reint1_131 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_131, __p3_131); \ - __ret_131 = __noswap_vbfdotq_f32(__rev0_131, __rev1_131, *(bfloat16x8_t *) &__reint1_131); \ - __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0); \ - __ret_131; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdot_laneq_f32(__p0_132, __p1_132, __p2_132, __p3_132) __extension__ ({ \ - float32x2_t __ret_132; \ - float32x2_t __s0_132 = __p0_132; \ - bfloat16x4_t __s1_132 = __p1_132; \ - bfloat16x8_t __s2_132 = __p2_132; \ -bfloat16x8_t __reint_132 = __s2_132; \ -float32x2_t __reint1_132 = splat_laneq_f32(*(float32x4_t *) &__reint_132, __p3_132); \ - __ret_132 = vbfdot_f32(__s0_132, __s1_132, *(bfloat16x4_t *) &__reint1_132); \ - __ret_132; \ -}) -#else -#define vbfdot_laneq_f32(__p0_133, __p1_133, __p2_133, __p3_133) __extension__ ({ \ - float32x2_t __ret_133; \ - float32x2_t __s0_133 = __p0_133; \ - bfloat16x4_t __s1_133 = __p1_133; \ - bfloat16x8_t __s2_133 = __p2_133; \ - float32x2_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 1, 0); \ - bfloat16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_133; __rev2_133 = __builtin_shufflevector(__s2_133, __s2_133, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_133 = __rev2_133; \ -float32x2_t __reint1_133 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_133, __p3_133); \ - __ret_133 = __noswap_vbfdot_f32(__rev0_133, __rev1_133, *(bfloat16x4_t *) &__reint1_133); \ - __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0); \ - __ret_133; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#endif - -#define vcreate_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (bfloat16x4_t)(__promote); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_134) { - float32x4_t __ret_134; -bfloat16x4_t __reint_134 = __p0_134; -int32x4_t __reint1_134 = vshll_n_s16(*(int16x4_t *) &__reint_134, 16); - __ret_134 = *(float32x4_t *) &__reint1_134; - return __ret_134; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_135) { - float32x4_t __ret_135; - bfloat16x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 3, 2, 1, 0); -bfloat16x4_t __reint_135 = __rev0_135; -int32x4_t __reint1_135 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_135, 16); - __ret_135 = *(float32x4_t *) &__reint1_135; - __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); - return __ret_135; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_136) { - float32x4_t __ret_136; -bfloat16x4_t __reint_136 = __p0_136; -int32x4_t __reint1_136 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_136, 16); - __ret_136 = *(float32x4_t *) &__reint1_136; - return __ret_136; -} -#endif - -__ai __attribute__((target("bf16"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { - float32_t __ret; -bfloat16_t __reint = __p0; -int32_t __reint1 = *(int32_t *) &__reint << 16; - __ret = *(float32_t *) &__reint1; - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { - bfloat16_t __ret; - __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_bf16(__p0_137, __p1_137) __extension__ ({ \ - bfloat16x8_t __ret_137; \ - bfloat16x4_t __s0_137 = __p0_137; \ - __ret_137 = splatq_lane_bf16(__s0_137, __p1_137); \ - __ret_137; \ -}) -#else -#define vdupq_lane_bf16(__p0_138, __p1_138) __extension__ ({ \ - bfloat16x8_t __ret_138; \ - bfloat16x4_t __s0_138 = __p0_138; \ - bfloat16x4_t __rev0_138; __rev0_138 = __builtin_shufflevector(__s0_138, __s0_138, 3, 2, 1, 0); \ - __ret_138 = __noswap_splatq_lane_bf16(__rev0_138, __p1_138); \ - __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_138; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_bf16(__p0_139, __p1_139) __extension__ ({ \ - bfloat16x4_t __ret_139; \ - bfloat16x4_t __s0_139 = __p0_139; \ - __ret_139 = splat_lane_bf16(__s0_139, __p1_139); \ - __ret_139; \ -}) -#else -#define vdup_lane_bf16(__p0_140, __p1_140) __extension__ ({ \ - bfloat16x4_t __ret_140; \ - bfloat16x4_t __s0_140 = __p0_140; \ - bfloat16x4_t __rev0_140; __rev0_140 = __builtin_shufflevector(__s0_140, __s0_140, 3, 2, 1, 0); \ - __ret_140 = __noswap_splat_lane_bf16(__rev0_140, __p1_140); \ - __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0); \ - __ret_140; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_bf16(__p0_141, __p1_141) __extension__ ({ \ - bfloat16x8_t __ret_141; \ - bfloat16x8_t __s0_141 = __p0_141; \ - __ret_141 = splatq_laneq_bf16(__s0_141, __p1_141); \ - __ret_141; \ -}) -#else -#define vdupq_laneq_bf16(__p0_142, __p1_142) __extension__ ({ \ - bfloat16x8_t __ret_142; \ - bfloat16x8_t __s0_142 = __p0_142; \ - bfloat16x8_t __rev0_142; __rev0_142 = __builtin_shufflevector(__s0_142, __s0_142, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_142 = __noswap_splatq_laneq_bf16(__rev0_142, __p1_142); \ - __ret_142 = __builtin_shufflevector(__ret_142, __ret_142, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_142; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_bf16(__p0_143, __p1_143) __extension__ ({ \ - bfloat16x4_t __ret_143; \ - bfloat16x8_t __s0_143 = __p0_143; \ - __ret_143 = splat_laneq_bf16(__s0_143, __p1_143); \ - __ret_143; \ -}) -#else -#define vdup_laneq_bf16(__p0_144, __p1_144) __extension__ ({ \ - bfloat16x4_t __ret_144; \ - bfloat16x8_t __s0_144 = __p0_144; \ - bfloat16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__s0_144, __s0_144, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_144 = __noswap_splat_laneq_bf16(__rev0_144, __p1_144); \ - __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0); \ - __ret_144; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ - __ret; \ -}) -#else -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ - __ret; \ -}) -#else -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ - __ret; \ -}) -#else -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ - __ret; \ -}) -#else -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ - __ret; \ -}) -#else -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ - __ret; \ -}) -#else -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ - __ret; \ -}) -#else -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ - __ret; \ -}) -#else -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ - __ret; \ -}) -#else -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ - __ret; \ -}) -#else -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ -}) -#else -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ -}) -#else -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ -}) -#else -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ -}) -#else -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) -#else -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) -#else -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) -#else -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) -#else -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) -#else -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) -#else -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) -#else -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) -#else -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ -}) -#else -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ -}) -#else -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) -#else -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) -#else -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ -}) -#else -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ -}) -#else -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) -#else -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) -#else -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ -}) -#else -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ -}) -#else -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("dotprod"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("dotprod"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} -#else -__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("dotprod"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("dotprod"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_u32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ - uint32x4_t __ret_145; \ - uint32x4_t __s0_145 = __p0_145; \ - uint8x16_t __s1_145 = __p1_145; \ - uint8x8_t __s2_145 = __p2_145; \ -uint8x8_t __reint_145 = __s2_145; \ -uint32x4_t __reint1_145 = splatq_lane_u32(*(uint32x2_t *) &__reint_145, __p3_145); \ - __ret_145 = vdotq_u32(__s0_145, __s1_145, *(uint8x16_t *) &__reint1_145); \ - __ret_145; \ -}) -#else -#define vdotq_lane_u32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ - uint32x4_t __ret_146; \ - uint32x4_t __s0_146 = __p0_146; \ - uint8x16_t __s1_146 = __p1_146; \ - uint8x8_t __s2_146 = __p2_146; \ - uint32x4_t __rev0_146; __rev0_146 = __builtin_shufflevector(__s0_146, __s0_146, 3, 2, 1, 0); \ - uint8x16_t __rev1_146; __rev1_146 = __builtin_shufflevector(__s1_146, __s1_146, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_146; __rev2_146 = __builtin_shufflevector(__s2_146, __s2_146, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_146 = __rev2_146; \ -uint32x4_t __reint1_146 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_146, __p3_146); \ - __ret_146 = __noswap_vdotq_u32(__rev0_146, __rev1_146, *(uint8x16_t *) &__reint1_146); \ - __ret_146 = __builtin_shufflevector(__ret_146, __ret_146, 3, 2, 1, 0); \ - __ret_146; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ - int32x4_t __ret_147; \ - int32x4_t __s0_147 = __p0_147; \ - int8x16_t __s1_147 = __p1_147; \ - int8x8_t __s2_147 = __p2_147; \ -int8x8_t __reint_147 = __s2_147; \ -int32x4_t __reint1_147 = splatq_lane_s32(*(int32x2_t *) &__reint_147, __p3_147); \ - __ret_147 = vdotq_s32(__s0_147, __s1_147, *(int8x16_t *) &__reint1_147); \ - __ret_147; \ -}) -#else -#define vdotq_lane_s32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ - int32x4_t __ret_148; \ - int32x4_t __s0_148 = __p0_148; \ - int8x16_t __s1_148 = __p1_148; \ - int8x8_t __s2_148 = __p2_148; \ - int32x4_t __rev0_148; __rev0_148 = __builtin_shufflevector(__s0_148, __s0_148, 3, 2, 1, 0); \ - int8x16_t __rev1_148; __rev1_148 = __builtin_shufflevector(__s1_148, __s1_148, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_148; __rev2_148 = __builtin_shufflevector(__s2_148, __s2_148, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_148 = __rev2_148; \ -int32x4_t __reint1_148 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_148, __p3_148); \ - __ret_148 = __noswap_vdotq_s32(__rev0_148, __rev1_148, *(int8x16_t *) &__reint1_148); \ - __ret_148 = __builtin_shufflevector(__ret_148, __ret_148, 3, 2, 1, 0); \ - __ret_148; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_lane_u32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ - uint32x2_t __ret_149; \ - uint32x2_t __s0_149 = __p0_149; \ - uint8x8_t __s1_149 = __p1_149; \ - uint8x8_t __s2_149 = __p2_149; \ -uint8x8_t __reint_149 = __s2_149; \ -uint32x2_t __reint1_149 = splat_lane_u32(*(uint32x2_t *) &__reint_149, __p3_149); \ - __ret_149 = vdot_u32(__s0_149, __s1_149, *(uint8x8_t *) &__reint1_149); \ - __ret_149; \ -}) -#else -#define vdot_lane_u32(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ - uint32x2_t __ret_150; \ - uint32x2_t __s0_150 = __p0_150; \ - uint8x8_t __s1_150 = __p1_150; \ - uint8x8_t __s2_150 = __p2_150; \ - uint32x2_t __rev0_150; __rev0_150 = __builtin_shufflevector(__s0_150, __s0_150, 1, 0); \ - uint8x8_t __rev1_150; __rev1_150 = __builtin_shufflevector(__s1_150, __s1_150, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_150; __rev2_150 = __builtin_shufflevector(__s2_150, __s2_150, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_150 = __rev2_150; \ -uint32x2_t __reint1_150 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150); \ - __ret_150 = __noswap_vdot_u32(__rev0_150, __rev1_150, *(uint8x8_t *) &__reint1_150); \ - __ret_150 = __builtin_shufflevector(__ret_150, __ret_150, 1, 0); \ - __ret_150; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_lane_s32(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ - int32x2_t __ret_151; \ - int32x2_t __s0_151 = __p0_151; \ - int8x8_t __s1_151 = __p1_151; \ - int8x8_t __s2_151 = __p2_151; \ -int8x8_t __reint_151 = __s2_151; \ -int32x2_t __reint1_151 = splat_lane_s32(*(int32x2_t *) &__reint_151, __p3_151); \ - __ret_151 = vdot_s32(__s0_151, __s1_151, *(int8x8_t *) &__reint1_151); \ - __ret_151; \ -}) -#else -#define vdot_lane_s32(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ - int32x2_t __ret_152; \ - int32x2_t __s0_152 = __p0_152; \ - int8x8_t __s1_152 = __p1_152; \ - int8x8_t __s2_152 = __p2_152; \ - int32x2_t __rev0_152; __rev0_152 = __builtin_shufflevector(__s0_152, __s0_152, 1, 0); \ - int8x8_t __rev1_152; __rev1_152 = __builtin_shufflevector(__s1_152, __s1_152, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_152; __rev2_152 = __builtin_shufflevector(__s2_152, __s2_152, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_152 = __rev2_152; \ -int32x2_t __reint1_152 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_152, __p3_152); \ - __ret_152 = __noswap_vdot_s32(__rev0_152, __rev1_152, *(int8x8_t *) &__reint1_152); \ - __ret_152 = __builtin_shufflevector(__ret_152, __ret_152, 1, 0); \ - __ret_152; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ - __ret; \ -}) -#else -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ - __ret; \ -}) -#else -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = vfmaq_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f16(__p0_153, __p1_153, __p2_153) __extension__ ({ \ - float16x8_t __ret_153; \ - float16x8_t __s0_153 = __p0_153; \ - float16x4_t __s1_153 = __p1_153; \ - __ret_153 = __s0_153 * splatq_lane_f16(__s1_153, __p2_153); \ - __ret_153; \ -}) -#else -#define vmulq_lane_f16(__p0_154, __p1_154, __p2_154) __extension__ ({ \ - float16x8_t __ret_154; \ - float16x8_t __s0_154 = __p0_154; \ - float16x4_t __s1_154 = __p1_154; \ - float16x8_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_154; __rev1_154 = __builtin_shufflevector(__s1_154, __s1_154, 3, 2, 1, 0); \ - __ret_154 = __rev0_154 * __noswap_splatq_lane_f16(__rev1_154, __p2_154); \ - __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_154; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f16(__p0_155, __p1_155, __p2_155) __extension__ ({ \ - float16x4_t __ret_155; \ - float16x4_t __s0_155 = __p0_155; \ - float16x4_t __s1_155 = __p1_155; \ - __ret_155 = __s0_155 * splat_lane_f16(__s1_155, __p2_155); \ - __ret_155; \ -}) -#else -#define vmul_lane_f16(__p0_156, __p1_156, __p2_156) __extension__ ({ \ - float16x4_t __ret_156; \ - float16x4_t __s0_156 = __p0_156; \ - float16x4_t __s1_156 = __p1_156; \ - float16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ - float16x4_t __rev1_156; __rev1_156 = __builtin_shufflevector(__s1_156, __s1_156, 3, 2, 1, 0); \ - __ret_156 = __rev0_156 * __noswap_splat_lane_f16(__rev1_156, __p2_156); \ - __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ - __ret_156; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; - __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8x2_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); @@ -35448,17 +35327,17 @@ __ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; - __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { float16x4x2_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); @@ -35467,241 +35346,13 @@ __ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vzip_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("i8mm"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("i8mm"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdotq_lane_s32(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ - int32x4_t __ret_157; \ - int32x4_t __s0_157 = __p0_157; \ - uint8x16_t __s1_157 = __p1_157; \ - int8x8_t __s2_157 = __p2_157; \ -int8x8_t __reint_157 = __s2_157; \ - __ret_157 = vusdotq_s32(__s0_157, __s1_157, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_157, __p3_157))); \ - __ret_157; \ -}) -#else -#define vusdotq_lane_s32(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ - int32x4_t __ret_158; \ - int32x4_t __s0_158 = __p0_158; \ - uint8x16_t __s1_158 = __p1_158; \ - int8x8_t __s2_158 = __p2_158; \ - int32x4_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 3, 2, 1, 0); \ - uint8x16_t __rev1_158; __rev1_158 = __builtin_shufflevector(__s1_158, __s1_158, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_158; __rev2_158 = __builtin_shufflevector(__s2_158, __s2_158, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_158 = __rev2_158; \ - __ret_158 = __noswap_vusdotq_s32(__rev0_158, __rev1_158, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_158, __p3_158))); \ - __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 3, 2, 1, 0); \ - __ret_158; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdot_lane_s32(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ - int32x2_t __ret_159; \ - int32x2_t __s0_159 = __p0_159; \ - uint8x8_t __s1_159 = __p1_159; \ - int8x8_t __s2_159 = __p2_159; \ -int8x8_t __reint_159 = __s2_159; \ - __ret_159 = vusdot_s32(__s0_159, __s1_159, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_159, __p3_159))); \ - __ret_159; \ -}) -#else -#define vusdot_lane_s32(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ - int32x2_t __ret_160; \ - int32x2_t __s0_160 = __p0_160; \ - uint8x8_t __s1_160 = __p1_160; \ - int8x8_t __s2_160 = __p2_160; \ - int32x2_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 1, 0); \ - uint8x8_t __rev1_160; __rev1_160 = __builtin_shufflevector(__s1_160, __s1_160, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_160; __rev2_160 = __builtin_shufflevector(__s2_160, __s2_160, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_160 = __rev2_160; \ - __ret_160 = __noswap_vusdot_s32(__rev0_160, __rev1_160, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_160, __p3_160))); \ - __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 1, 0); \ - __ret_160; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -35710,7 +35361,7 @@ __ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, in __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; @@ -35718,13 +35369,13 @@ __ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -35733,7 +35384,7 @@ __ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, in __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; @@ -35741,13 +35392,13 @@ __ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -35756,7 +35407,7 @@ __ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; @@ -35764,13 +35415,13 @@ __ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -35779,7 +35430,7 @@ __ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; @@ -35787,109 +35438,109 @@ __ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s32(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ - int32x4_t __ret_161; \ - int32x4_t __s0_161 = __p0_161; \ - int32x4_t __s1_161 = __p1_161; \ - int32x2_t __s2_161 = __p2_161; \ - __ret_161 = vqrdmlahq_s32(__s0_161, __s1_161, splatq_lane_s32(__s2_161, __p3_161)); \ - __ret_161; \ +#define vqrdmlahq_lane_s32(__p0_134, __p1_134, __p2_134, __p3_134) __extension__ ({ \ + int32x4_t __ret_134; \ + int32x4_t __s0_134 = __p0_134; \ + int32x4_t __s1_134 = __p1_134; \ + int32x2_t __s2_134 = __p2_134; \ + __ret_134 = vqrdmlahq_s32(__s0_134, __s1_134, splatq_lane_s32(__s2_134, __p3_134)); \ + __ret_134; \ }) #else -#define vqrdmlahq_lane_s32(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ - int32x4_t __ret_162; \ - int32x4_t __s0_162 = __p0_162; \ - int32x4_t __s1_162 = __p1_162; \ - int32x2_t __s2_162 = __p2_162; \ - int32x4_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 3, 2, 1, 0); \ - int32x4_t __rev1_162; __rev1_162 = __builtin_shufflevector(__s1_162, __s1_162, 3, 2, 1, 0); \ - int32x2_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 1, 0); \ - __ret_162 = __noswap_vqrdmlahq_s32(__rev0_162, __rev1_162, __noswap_splatq_lane_s32(__rev2_162, __p3_162)); \ - __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 3, 2, 1, 0); \ - __ret_162; \ +#define vqrdmlahq_lane_s32(__p0_135, __p1_135, __p2_135, __p3_135) __extension__ ({ \ + int32x4_t __ret_135; \ + int32x4_t __s0_135 = __p0_135; \ + int32x4_t __s1_135 = __p1_135; \ + int32x2_t __s2_135 = __p2_135; \ + int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ + int32x4_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 3, 2, 1, 0); \ + int32x2_t __rev2_135; __rev2_135 = __builtin_shufflevector(__s2_135, __s2_135, 1, 0); \ + __ret_135 = __noswap_vqrdmlahq_s32(__rev0_135, __rev1_135, __noswap_splatq_lane_s32(__rev2_135, __p3_135)); \ + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ + __ret_135; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ - int16x8_t __ret_163; \ - int16x8_t __s0_163 = __p0_163; \ - int16x8_t __s1_163 = __p1_163; \ - int16x4_t __s2_163 = __p2_163; \ - __ret_163 = vqrdmlahq_s16(__s0_163, __s1_163, splatq_lane_s16(__s2_163, __p3_163)); \ - __ret_163; \ +#define vqrdmlahq_lane_s16(__p0_136, __p1_136, __p2_136, __p3_136) __extension__ ({ \ + int16x8_t __ret_136; \ + int16x8_t __s0_136 = __p0_136; \ + int16x8_t __s1_136 = __p1_136; \ + int16x4_t __s2_136 = __p2_136; \ + __ret_136 = vqrdmlahq_s16(__s0_136, __s1_136, splatq_lane_s16(__s2_136, __p3_136)); \ + __ret_136; \ }) #else -#define vqrdmlahq_lane_s16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ - int16x8_t __ret_164; \ - int16x8_t __s0_164 = __p0_164; \ - int16x8_t __s1_164 = __p1_164; \ - int16x4_t __s2_164 = __p2_164; \ - int16x8_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_164; __rev1_164 = __builtin_shufflevector(__s1_164, __s1_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ - __ret_164 = __noswap_vqrdmlahq_s16(__rev0_164, __rev1_164, __noswap_splatq_lane_s16(__rev2_164, __p3_164)); \ - __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_164; \ +#define vqrdmlahq_lane_s16(__p0_137, __p1_137, __p2_137, __p3_137) __extension__ ({ \ + int16x8_t __ret_137; \ + int16x8_t __s0_137 = __p0_137; \ + int16x8_t __s1_137 = __p1_137; \ + int16x4_t __s2_137 = __p2_137; \ + int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_137; __rev2_137 = __builtin_shufflevector(__s2_137, __s2_137, 3, 2, 1, 0); \ + __ret_137 = __noswap_vqrdmlahq_s16(__rev0_137, __rev1_137, __noswap_splatq_lane_s16(__rev2_137, __p3_137)); \ + __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_137; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s32(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ - int32x2_t __ret_165; \ - int32x2_t __s0_165 = __p0_165; \ - int32x2_t __s1_165 = __p1_165; \ - int32x2_t __s2_165 = __p2_165; \ - __ret_165 = vqrdmlah_s32(__s0_165, __s1_165, splat_lane_s32(__s2_165, __p3_165)); \ - __ret_165; \ +#define vqrdmlah_lane_s32(__p0_138, __p1_138, __p2_138, __p3_138) __extension__ ({ \ + int32x2_t __ret_138; \ + int32x2_t __s0_138 = __p0_138; \ + int32x2_t __s1_138 = __p1_138; \ + int32x2_t __s2_138 = __p2_138; \ + __ret_138 = vqrdmlah_s32(__s0_138, __s1_138, splat_lane_s32(__s2_138, __p3_138)); \ + __ret_138; \ }) #else -#define vqrdmlah_lane_s32(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ - int32x2_t __ret_166; \ - int32x2_t __s0_166 = __p0_166; \ - int32x2_t __s1_166 = __p1_166; \ - int32x2_t __s2_166 = __p2_166; \ - int32x2_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 1, 0); \ - int32x2_t __rev1_166; __rev1_166 = __builtin_shufflevector(__s1_166, __s1_166, 1, 0); \ - int32x2_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 1, 0); \ - __ret_166 = __noswap_vqrdmlah_s32(__rev0_166, __rev1_166, __noswap_splat_lane_s32(__rev2_166, __p3_166)); \ - __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 1, 0); \ - __ret_166; \ +#define vqrdmlah_lane_s32(__p0_139, __p1_139, __p2_139, __p3_139) __extension__ ({ \ + int32x2_t __ret_139; \ + int32x2_t __s0_139 = __p0_139; \ + int32x2_t __s1_139 = __p1_139; \ + int32x2_t __s2_139 = __p2_139; \ + int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ + int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ + int32x2_t __rev2_139; __rev2_139 = __builtin_shufflevector(__s2_139, __s2_139, 1, 0); \ + __ret_139 = __noswap_vqrdmlah_s32(__rev0_139, __rev1_139, __noswap_splat_lane_s32(__rev2_139, __p3_139)); \ + __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ + __ret_139; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ - int16x4_t __ret_167; \ - int16x4_t __s0_167 = __p0_167; \ - int16x4_t __s1_167 = __p1_167; \ - int16x4_t __s2_167 = __p2_167; \ - __ret_167 = vqrdmlah_s16(__s0_167, __s1_167, splat_lane_s16(__s2_167, __p3_167)); \ - __ret_167; \ +#define vqrdmlah_lane_s16(__p0_140, __p1_140, __p2_140, __p3_140) __extension__ ({ \ + int16x4_t __ret_140; \ + int16x4_t __s0_140 = __p0_140; \ + int16x4_t __s1_140 = __p1_140; \ + int16x4_t __s2_140 = __p2_140; \ + __ret_140 = vqrdmlah_s16(__s0_140, __s1_140, splat_lane_s16(__s2_140, __p3_140)); \ + __ret_140; \ }) #else -#define vqrdmlah_lane_s16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ - int16x4_t __ret_168; \ - int16x4_t __s0_168 = __p0_168; \ - int16x4_t __s1_168 = __p1_168; \ - int16x4_t __s2_168 = __p2_168; \ - int16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ - int16x4_t __rev1_168; __rev1_168 = __builtin_shufflevector(__s1_168, __s1_168, 3, 2, 1, 0); \ - int16x4_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 3, 2, 1, 0); \ - __ret_168 = __noswap_vqrdmlah_s16(__rev0_168, __rev1_168, __noswap_splat_lane_s16(__rev2_168, __p3_168)); \ - __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ - __ret_168; \ +#define vqrdmlah_lane_s16(__p0_141, __p1_141, __p2_141, __p3_141) __extension__ ({ \ + int16x4_t __ret_141; \ + int16x4_t __s0_141 = __p0_141; \ + int16x4_t __s1_141 = __p1_141; \ + int16x4_t __s2_141 = __p2_141; \ + int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ + int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ + int16x4_t __rev2_141; __rev2_141 = __builtin_shufflevector(__s2_141, __s2_141, 3, 2, 1, 0); \ + __ret_141 = __noswap_vqrdmlah_s16(__rev0_141, __rev1_141, __noswap_splat_lane_s16(__rev2_141, __p3_141)); \ + __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ + __ret_141; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -35898,7 +35549,7 @@ __ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, in __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; @@ -35906,13 +35557,13 @@ __ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -35921,7 +35572,7 @@ __ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, in __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; @@ -35929,13 +35580,13 @@ __ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -35944,7 +35595,7 @@ __ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; @@ -35952,13 +35603,13 @@ __ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -35967,7 +35618,7 @@ __ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; @@ -35975,809 +35626,109 @@ __ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ - int32x4_t __ret_169; \ - int32x4_t __s0_169 = __p0_169; \ - int32x4_t __s1_169 = __p1_169; \ - int32x2_t __s2_169 = __p2_169; \ - __ret_169 = vqrdmlshq_s32(__s0_169, __s1_169, splatq_lane_s32(__s2_169, __p3_169)); \ - __ret_169; \ +#define vqrdmlshq_lane_s32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ + int32x4_t __ret_142; \ + int32x4_t __s0_142 = __p0_142; \ + int32x4_t __s1_142 = __p1_142; \ + int32x2_t __s2_142 = __p2_142; \ + __ret_142 = vqrdmlshq_s32(__s0_142, __s1_142, splatq_lane_s32(__s2_142, __p3_142)); \ + __ret_142; \ }) #else -#define vqrdmlshq_lane_s32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ - int32x4_t __ret_170; \ - int32x4_t __s0_170 = __p0_170; \ - int32x4_t __s1_170 = __p1_170; \ - int32x2_t __s2_170 = __p2_170; \ - int32x4_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \ - int32x4_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 3, 2, 1, 0); \ - int32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ - __ret_170 = __noswap_vqrdmlshq_s32(__rev0_170, __rev1_170, __noswap_splatq_lane_s32(__rev2_170, __p3_170)); \ - __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \ - __ret_170; \ +#define vqrdmlshq_lane_s32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ + int32x4_t __ret_143; \ + int32x4_t __s0_143 = __p0_143; \ + int32x4_t __s1_143 = __p1_143; \ + int32x2_t __s2_143 = __p2_143; \ + int32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ + int32x4_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 3, 2, 1, 0); \ + int32x2_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 1, 0); \ + __ret_143 = __noswap_vqrdmlshq_s32(__rev0_143, __rev1_143, __noswap_splatq_lane_s32(__rev2_143, __p3_143)); \ + __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ + __ret_143; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ - int16x8_t __ret_171; \ - int16x8_t __s0_171 = __p0_171; \ - int16x8_t __s1_171 = __p1_171; \ - int16x4_t __s2_171 = __p2_171; \ - __ret_171 = vqrdmlshq_s16(__s0_171, __s1_171, splatq_lane_s16(__s2_171, __p3_171)); \ - __ret_171; \ +#define vqrdmlshq_lane_s16(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ + int16x8_t __ret_144; \ + int16x8_t __s0_144 = __p0_144; \ + int16x8_t __s1_144 = __p1_144; \ + int16x4_t __s2_144 = __p2_144; \ + __ret_144 = vqrdmlshq_s16(__s0_144, __s1_144, splatq_lane_s16(__s2_144, __p3_144)); \ + __ret_144; \ }) #else -#define vqrdmlshq_lane_s16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ - int16x8_t __ret_172; \ - int16x8_t __s0_172 = __p0_172; \ - int16x8_t __s1_172 = __p1_172; \ - int16x4_t __s2_172 = __p2_172; \ - int16x8_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 3, 2, 1, 0); \ - __ret_172 = __noswap_vqrdmlshq_s16(__rev0_172, __rev1_172, __noswap_splatq_lane_s16(__rev2_172, __p3_172)); \ - __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_172; \ +#define vqrdmlshq_lane_s16(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + int16x8_t __ret_145; \ + int16x8_t __s0_145 = __p0_145; \ + int16x8_t __s1_145 = __p1_145; \ + int16x4_t __s2_145 = __p2_145; \ + int16x8_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ + __ret_145 = __noswap_vqrdmlshq_s16(__rev0_145, __rev1_145, __noswap_splatq_lane_s16(__rev2_145, __p3_145)); \ + __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_145; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ - int32x2_t __ret_173; \ - int32x2_t __s0_173 = __p0_173; \ - int32x2_t __s1_173 = __p1_173; \ - int32x2_t __s2_173 = __p2_173; \ - __ret_173 = vqrdmlsh_s32(__s0_173, __s1_173, splat_lane_s32(__s2_173, __p3_173)); \ - __ret_173; \ +#define vqrdmlsh_lane_s32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + int32x2_t __ret_146; \ + int32x2_t __s0_146 = __p0_146; \ + int32x2_t __s1_146 = __p1_146; \ + int32x2_t __s2_146 = __p2_146; \ + __ret_146 = vqrdmlsh_s32(__s0_146, __s1_146, splat_lane_s32(__s2_146, __p3_146)); \ + __ret_146; \ }) #else -#define vqrdmlsh_lane_s32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ - int32x2_t __ret_174; \ - int32x2_t __s0_174 = __p0_174; \ - int32x2_t __s1_174 = __p1_174; \ - int32x2_t __s2_174 = __p2_174; \ - int32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ - int32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ - int32x2_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 1, 0); \ - __ret_174 = __noswap_vqrdmlsh_s32(__rev0_174, __rev1_174, __noswap_splat_lane_s32(__rev2_174, __p3_174)); \ - __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ - __ret_174; \ +#define vqrdmlsh_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + int32x2_t __ret_147; \ + int32x2_t __s0_147 = __p0_147; \ + int32x2_t __s1_147 = __p1_147; \ + int32x2_t __s2_147 = __p2_147; \ + int32x2_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 1, 0); \ + int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \ + int32x2_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 1, 0); \ + __ret_147 = __noswap_vqrdmlsh_s32(__rev0_147, __rev1_147, __noswap_splat_lane_s32(__rev2_147, __p3_147)); \ + __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 1, 0); \ + __ret_147; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ - int16x4_t __ret_175; \ - int16x4_t __s0_175 = __p0_175; \ - int16x4_t __s1_175 = __p1_175; \ - int16x4_t __s2_175 = __p2_175; \ - __ret_175 = vqrdmlsh_s16(__s0_175, __s1_175, splat_lane_s16(__s2_175, __p3_175)); \ - __ret_175; \ +#define vqrdmlsh_lane_s16(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + int16x4_t __ret_148; \ + int16x4_t __s0_148 = __p0_148; \ + int16x4_t __s1_148 = __p1_148; \ + int16x4_t __s2_148 = __p2_148; \ + __ret_148 = vqrdmlsh_s16(__s0_148, __s1_148, splat_lane_s16(__s2_148, __p3_148)); \ + __ret_148; \ }) #else -#define vqrdmlsh_lane_s16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ - int16x4_t __ret_176; \ - int16x4_t __s0_176 = __p0_176; \ - int16x4_t __s1_176 = __p1_176; \ - int16x4_t __s2_176 = __p2_176; \ - int16x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ - int16x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ - int16x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ - __ret_176 = __noswap_vqrdmlsh_s16(__rev0_176, __rev1_176, __noswap_splat_lane_s16(__rev2_176, __p3_176)); \ - __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ - __ret_176; \ +#define vqrdmlsh_lane_s16(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + int16x4_t __ret_149; \ + int16x4_t __s0_149 = __p0_149; \ + int16x4_t __s1_149 = __p1_149; \ + int16x4_t __s2_149 = __p2_149; \ + int16x4_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 3, 2, 1, 0); \ + int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ + int16x4_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 3, 2, 1, 0); \ + __ret_149 = __noswap_vqrdmlsh_s16(__rev0_149, __rev1_149, __noswap_splat_lane_s16(__rev2_149, __p3_149)); \ + __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 3, 2, 1, 0); \ + __ret_149; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ - float32x2_t __ret_177; \ - float32x2_t __s0_177 = __p0_177; \ - float32x2_t __s1_177 = __p1_177; \ - float32x2_t __s2_177 = __p2_177; \ -float32x2_t __reint_177 = __s2_177; \ -uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ - __ret_177 = vcmla_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ - __ret_177; \ -}) -#else -#define vcmla_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ - float32x2_t __ret_178; \ - float32x2_t __s0_178 = __p0_178; \ - float32x2_t __s1_178 = __p1_178; \ - float32x2_t __s2_178 = __p2_178; \ - float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ - float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ - float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ -float32x2_t __reint_178 = __rev2_178; \ -uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ - __ret_178 = __noswap_vcmla_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ - __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ - __ret_178; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ - float32x4_t __ret_179; \ - float32x4_t __s0_179 = __p0_179; \ - float32x4_t __s1_179 = __p1_179; \ - float32x2_t __s2_179 = __p2_179; \ -float32x2_t __reint_179 = __s2_179; \ -uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ - __ret_179 = vcmlaq_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ - __ret_179; \ -}) -#else -#define vcmlaq_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ - float32x4_t __ret_180; \ - float32x4_t __s0_180 = __p0_180; \ - float32x4_t __s1_180 = __p1_180; \ - float32x2_t __s2_180 = __p2_180; \ - float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ - float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ - float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ -float32x2_t __reint_180 = __rev2_180; \ -uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ - __ret_180 = __noswap_vcmlaq_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ - __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ - __ret_180; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ - float32x2_t __ret_181; \ - float32x2_t __s0_181 = __p0_181; \ - float32x2_t __s1_181 = __p1_181; \ - float32x4_t __s2_181 = __p2_181; \ -float32x4_t __reint_181 = __s2_181; \ -uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ - __ret_181 = vcmla_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ - __ret_181; \ -}) -#else -#define vcmla_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ - float32x2_t __ret_182; \ - float32x2_t __s0_182 = __p0_182; \ - float32x2_t __s1_182 = __p1_182; \ - float32x4_t __s2_182 = __p2_182; \ - float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ - float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ - float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ -float32x4_t __reint_182 = __rev2_182; \ -uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ - __ret_182 = __noswap_vcmla_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ - __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ - __ret_182; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ - float32x4_t __ret_183; \ - float32x4_t __s0_183 = __p0_183; \ - float32x4_t __s1_183 = __p1_183; \ - float32x4_t __s2_183 = __p2_183; \ -float32x4_t __reint_183 = __s2_183; \ -uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ - __ret_183 = vcmlaq_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ - __ret_183; \ -}) -#else -#define vcmlaq_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ - float32x4_t __ret_184; \ - float32x4_t __s0_184 = __p0_184; \ - float32x4_t __s1_184 = __p1_184; \ - float32x4_t __s2_184 = __p2_184; \ - float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ - float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ - float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ -float32x4_t __reint_184 = __rev2_184; \ -uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ - __ret_184 = __noswap_vcmlaq_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ - __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ - __ret_184; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ - float32x2_t __ret_185; \ - float32x2_t __s0_185 = __p0_185; \ - float32x2_t __s1_185 = __p1_185; \ - float32x2_t __s2_185 = __p2_185; \ -float32x2_t __reint_185 = __s2_185; \ -uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ - __ret_185 = vcmla_rot180_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ - __ret_185; \ -}) -#else -#define vcmla_rot180_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ - float32x2_t __ret_186; \ - float32x2_t __s0_186 = __p0_186; \ - float32x2_t __s1_186 = __p1_186; \ - float32x2_t __s2_186 = __p2_186; \ - float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ - float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ - float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ -float32x2_t __reint_186 = __rev2_186; \ -uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ - __ret_186 = __noswap_vcmla_rot180_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ - __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ - __ret_186; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ - float32x4_t __ret_187; \ - float32x4_t __s0_187 = __p0_187; \ - float32x4_t __s1_187 = __p1_187; \ - float32x2_t __s2_187 = __p2_187; \ -float32x2_t __reint_187 = __s2_187; \ -uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ - __ret_187 = vcmlaq_rot180_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ - __ret_187; \ -}) -#else -#define vcmlaq_rot180_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ - float32x4_t __ret_188; \ - float32x4_t __s0_188 = __p0_188; \ - float32x4_t __s1_188 = __p1_188; \ - float32x2_t __s2_188 = __p2_188; \ - float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ - float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ - float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ -float32x2_t __reint_188 = __rev2_188; \ -uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ - __ret_188 = __noswap_vcmlaq_rot180_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ - __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ - __ret_188; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ - float32x2_t __ret_189; \ - float32x2_t __s0_189 = __p0_189; \ - float32x2_t __s1_189 = __p1_189; \ - float32x4_t __s2_189 = __p2_189; \ -float32x4_t __reint_189 = __s2_189; \ -uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ - __ret_189 = vcmla_rot180_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ - __ret_189; \ -}) -#else -#define vcmla_rot180_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ - float32x2_t __ret_190; \ - float32x2_t __s0_190 = __p0_190; \ - float32x2_t __s1_190 = __p1_190; \ - float32x4_t __s2_190 = __p2_190; \ - float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ - float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ - float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ -float32x4_t __reint_190 = __rev2_190; \ -uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ - __ret_190 = __noswap_vcmla_rot180_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ - __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ - __ret_190; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ - float32x4_t __ret_191; \ - float32x4_t __s0_191 = __p0_191; \ - float32x4_t __s1_191 = __p1_191; \ - float32x4_t __s2_191 = __p2_191; \ -float32x4_t __reint_191 = __s2_191; \ -uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ - __ret_191 = vcmlaq_rot180_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ - __ret_191; \ -}) -#else -#define vcmlaq_rot180_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ - float32x4_t __ret_192; \ - float32x4_t __s0_192 = __p0_192; \ - float32x4_t __s1_192 = __p1_192; \ - float32x4_t __s2_192 = __p2_192; \ - float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ - float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ - float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ -float32x4_t __reint_192 = __rev2_192; \ -uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ - __ret_192 = __noswap_vcmlaq_rot180_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ - __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ - __ret_192; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ - float32x2_t __ret_193; \ - float32x2_t __s0_193 = __p0_193; \ - float32x2_t __s1_193 = __p1_193; \ - float32x2_t __s2_193 = __p2_193; \ -float32x2_t __reint_193 = __s2_193; \ -uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ - __ret_193 = vcmla_rot270_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ - __ret_193; \ -}) -#else -#define vcmla_rot270_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ - float32x2_t __ret_194; \ - float32x2_t __s0_194 = __p0_194; \ - float32x2_t __s1_194 = __p1_194; \ - float32x2_t __s2_194 = __p2_194; \ - float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ - float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ - float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ -float32x2_t __reint_194 = __rev2_194; \ -uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ - __ret_194 = __noswap_vcmla_rot270_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ - __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ - __ret_194; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ - float32x4_t __ret_195; \ - float32x4_t __s0_195 = __p0_195; \ - float32x4_t __s1_195 = __p1_195; \ - float32x2_t __s2_195 = __p2_195; \ -float32x2_t __reint_195 = __s2_195; \ -uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ - __ret_195 = vcmlaq_rot270_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ - __ret_195; \ -}) -#else -#define vcmlaq_rot270_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ - float32x4_t __ret_196; \ - float32x4_t __s0_196 = __p0_196; \ - float32x4_t __s1_196 = __p1_196; \ - float32x2_t __s2_196 = __p2_196; \ - float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ - float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ - float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ -float32x2_t __reint_196 = __rev2_196; \ -uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ - __ret_196 = __noswap_vcmlaq_rot270_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ - __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ - __ret_196; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ - float32x2_t __ret_197; \ - float32x2_t __s0_197 = __p0_197; \ - float32x2_t __s1_197 = __p1_197; \ - float32x4_t __s2_197 = __p2_197; \ -float32x4_t __reint_197 = __s2_197; \ -uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ - __ret_197 = vcmla_rot270_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ - __ret_197; \ -}) -#else -#define vcmla_rot270_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ - float32x2_t __ret_198; \ - float32x2_t __s0_198 = __p0_198; \ - float32x2_t __s1_198 = __p1_198; \ - float32x4_t __s2_198 = __p2_198; \ - float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ - float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ - float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ -float32x4_t __reint_198 = __rev2_198; \ -uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ - __ret_198 = __noswap_vcmla_rot270_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ - __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ - __ret_198; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ - float32x4_t __ret_199; \ - float32x4_t __s0_199 = __p0_199; \ - float32x4_t __s1_199 = __p1_199; \ - float32x4_t __s2_199 = __p2_199; \ -float32x4_t __reint_199 = __s2_199; \ -uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ - __ret_199 = vcmlaq_rot270_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ - __ret_199; \ -}) -#else -#define vcmlaq_rot270_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ - float32x4_t __ret_200; \ - float32x4_t __s0_200 = __p0_200; \ - float32x4_t __s1_200 = __p1_200; \ - float32x4_t __s2_200 = __p2_200; \ - float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ - float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ - float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ -float32x4_t __reint_200 = __rev2_200; \ -uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ - __ret_200 = __noswap_vcmlaq_rot270_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ - __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ - __ret_200; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ - float32x2_t __ret_201; \ - float32x2_t __s0_201 = __p0_201; \ - float32x2_t __s1_201 = __p1_201; \ - float32x2_t __s2_201 = __p2_201; \ -float32x2_t __reint_201 = __s2_201; \ -uint64x1_t __reint1_201 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ - __ret_201 = vcmla_rot90_f32(__s0_201, __s1_201, *(float32x2_t *) &__reint1_201); \ - __ret_201; \ -}) -#else -#define vcmla_rot90_lane_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ - float32x2_t __ret_202; \ - float32x2_t __s0_202 = __p0_202; \ - float32x2_t __s1_202 = __p1_202; \ - float32x2_t __s2_202 = __p2_202; \ - float32x2_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 1, 0); \ - float32x2_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 1, 0); \ - float32x2_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 1, 0); \ -float32x2_t __reint_202 = __rev2_202; \ -uint64x1_t __reint1_202 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_202, __p3_202)}; \ - __ret_202 = __noswap_vcmla_rot90_f32(__rev0_202, __rev1_202, *(float32x2_t *) &__reint1_202); \ - __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 1, 0); \ - __ret_202; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ - float32x4_t __ret_203; \ - float32x4_t __s0_203 = __p0_203; \ - float32x4_t __s1_203 = __p1_203; \ - float32x2_t __s2_203 = __p2_203; \ -float32x2_t __reint_203 = __s2_203; \ -uint64x2_t __reint1_203 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203), vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203)}; \ - __ret_203 = vcmlaq_rot90_f32(__s0_203, __s1_203, *(float32x4_t *) &__reint1_203); \ - __ret_203; \ -}) -#else -#define vcmlaq_rot90_lane_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ - float32x4_t __ret_204; \ - float32x4_t __s0_204 = __p0_204; \ - float32x4_t __s1_204 = __p1_204; \ - float32x2_t __s2_204 = __p2_204; \ - float32x4_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \ - float32x4_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 3, 2, 1, 0); \ - float32x2_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 1, 0); \ -float32x2_t __reint_204 = __rev2_204; \ -uint64x2_t __reint1_204 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204), vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204)}; \ - __ret_204 = __noswap_vcmlaq_rot90_f32(__rev0_204, __rev1_204, *(float32x4_t *) &__reint1_204); \ - __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \ - __ret_204; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ - float32x2_t __ret_205; \ - float32x2_t __s0_205 = __p0_205; \ - float32x2_t __s1_205 = __p1_205; \ - float32x4_t __s2_205 = __p2_205; \ -float32x4_t __reint_205 = __s2_205; \ -uint64x1_t __reint1_205 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ - __ret_205 = vcmla_rot90_f32(__s0_205, __s1_205, *(float32x2_t *) &__reint1_205); \ - __ret_205; \ -}) -#else -#define vcmla_rot90_laneq_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ - float32x2_t __ret_206; \ - float32x2_t __s0_206 = __p0_206; \ - float32x2_t __s1_206 = __p1_206; \ - float32x4_t __s2_206 = __p2_206; \ - float32x2_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 1, 0); \ - float32x2_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 1, 0); \ - float32x4_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 3, 2, 1, 0); \ -float32x4_t __reint_206 = __rev2_206; \ -uint64x1_t __reint1_206 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_206, __p3_206)}; \ - __ret_206 = __noswap_vcmla_rot90_f32(__rev0_206, __rev1_206, *(float32x2_t *) &__reint1_206); \ - __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 1, 0); \ - __ret_206; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ - float32x4_t __ret_207; \ - float32x4_t __s0_207 = __p0_207; \ - float32x4_t __s1_207 = __p1_207; \ - float32x4_t __s2_207 = __p2_207; \ -float32x4_t __reint_207 = __s2_207; \ -uint64x2_t __reint1_207 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207), vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207)}; \ - __ret_207 = vcmlaq_rot90_f32(__s0_207, __s1_207, *(float32x4_t *) &__reint1_207); \ - __ret_207; \ -}) -#else -#define vcmlaq_rot90_laneq_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ - float32x4_t __ret_208; \ - float32x4_t __s0_208 = __p0_208; \ - float32x4_t __s1_208 = __p1_208; \ - float32x4_t __s2_208 = __p2_208; \ - float32x4_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \ - float32x4_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \ - float32x4_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 3, 2, 1, 0); \ -float32x4_t __reint_208 = __rev2_208; \ -uint64x2_t __reint1_208 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208)}; \ - __ret_208 = __noswap_vcmlaq_rot90_f32(__rev0_208, __rev1_208, *(float32x4_t *) &__reint1_208); \ - __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \ - __ret_208; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -36788,13 +35739,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(floa #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -36805,13 +35756,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -36822,13 +35773,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(flo #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -36839,13 +35790,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(floa #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -36854,7 +35805,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; @@ -36862,13 +35813,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_f16(f #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -36877,7 +35828,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; @@ -36885,125 +35836,125 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_f16(fl #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ - float16x4_t __ret_209; \ - float16x4_t __s0_209 = __p0_209; \ - float16x4_t __s1_209 = __p1_209; \ - float16x4_t __s2_209 = __p2_209; \ -float16x4_t __reint_209 = __s2_209; \ -uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ - __ret_209 = vcmla_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ - __ret_209; \ +#define vcmla_lane_f16(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ + float16x4_t __ret_150; \ + float16x4_t __s0_150 = __p0_150; \ + float16x4_t __s1_150 = __p1_150; \ + float16x4_t __s2_150 = __p2_150; \ +float16x4_t __reint_150 = __s2_150; \ +uint32x2_t __reint1_150 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150), vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150)}; \ + __ret_150 = vcmla_f16(__s0_150, __s1_150, *(float16x4_t *) &__reint1_150); \ + __ret_150; \ }) #else -#define vcmla_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ - float16x4_t __ret_210; \ - float16x4_t __s0_210 = __p0_210; \ - float16x4_t __s1_210 = __p1_210; \ - float16x4_t __s2_210 = __p2_210; \ - float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ - float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ - float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ -float16x4_t __reint_210 = __rev2_210; \ -uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ - __ret_210 = __noswap_vcmla_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ - __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ - __ret_210; \ +#define vcmla_lane_f16(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ + float16x4_t __ret_151; \ + float16x4_t __s0_151 = __p0_151; \ + float16x4_t __s1_151 = __p1_151; \ + float16x4_t __s2_151 = __p2_151; \ + float16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__s0_151, __s0_151, 3, 2, 1, 0); \ + float16x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \ + float16x4_t __rev2_151; __rev2_151 = __builtin_shufflevector(__s2_151, __s2_151, 3, 2, 1, 0); \ +float16x4_t __reint_151 = __rev2_151; \ +uint32x2_t __reint1_151 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151)}; \ + __ret_151 = __noswap_vcmla_f16(__rev0_151, __rev1_151, *(float16x4_t *) &__reint1_151); \ + __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); \ + __ret_151; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ - float16x8_t __ret_211; \ - float16x8_t __s0_211 = __p0_211; \ - float16x8_t __s1_211 = __p1_211; \ - float16x4_t __s2_211 = __p2_211; \ -float16x4_t __reint_211 = __s2_211; \ -uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ - __ret_211 = vcmlaq_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ - __ret_211; \ +#define vcmlaq_lane_f16(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ + float16x8_t __ret_152; \ + float16x8_t __s0_152 = __p0_152; \ + float16x8_t __s1_152 = __p1_152; \ + float16x4_t __s2_152 = __p2_152; \ +float16x4_t __reint_152 = __s2_152; \ +uint32x4_t __reint1_152 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152)}; \ + __ret_152 = vcmlaq_f16(__s0_152, __s1_152, *(float16x8_t *) &__reint1_152); \ + __ret_152; \ }) #else -#define vcmlaq_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ - float16x8_t __ret_212; \ - float16x8_t __s0_212 = __p0_212; \ - float16x8_t __s1_212 = __p1_212; \ - float16x4_t __s2_212 = __p2_212; \ - float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ -float16x4_t __reint_212 = __rev2_212; \ -uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ - __ret_212 = __noswap_vcmlaq_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ - __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_212; \ +#define vcmlaq_lane_f16(__p0_153, __p1_153, __p2_153, __p3_153) __extension__ ({ \ + float16x8_t __ret_153; \ + float16x8_t __s0_153 = __p0_153; \ + float16x8_t __s1_153 = __p1_153; \ + float16x4_t __s2_153 = __p2_153; \ + float16x8_t __rev0_153; __rev0_153 = __builtin_shufflevector(__s0_153, __s0_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_153; __rev2_153 = __builtin_shufflevector(__s2_153, __s2_153, 3, 2, 1, 0); \ +float16x4_t __reint_153 = __rev2_153; \ +uint32x4_t __reint1_153 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153)}; \ + __ret_153 = __noswap_vcmlaq_f16(__rev0_153, __rev1_153, *(float16x8_t *) &__reint1_153); \ + __ret_153 = __builtin_shufflevector(__ret_153, __ret_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_153; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ - float16x4_t __ret_213; \ - float16x4_t __s0_213 = __p0_213; \ - float16x4_t __s1_213 = __p1_213; \ - float16x8_t __s2_213 = __p2_213; \ -float16x8_t __reint_213 = __s2_213; \ -uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ - __ret_213 = vcmla_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ - __ret_213; \ +#define vcmla_laneq_f16(__p0_154, __p1_154, __p2_154, __p3_154) __extension__ ({ \ + float16x4_t __ret_154; \ + float16x4_t __s0_154 = __p0_154; \ + float16x4_t __s1_154 = __p1_154; \ + float16x8_t __s2_154 = __p2_154; \ +float16x8_t __reint_154 = __s2_154; \ +uint32x2_t __reint1_154 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154), vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154)}; \ + __ret_154 = vcmla_f16(__s0_154, __s1_154, *(float16x4_t *) &__reint1_154); \ + __ret_154; \ }) #else -#define vcmla_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ - float16x4_t __ret_214; \ - float16x4_t __s0_214 = __p0_214; \ - float16x4_t __s1_214 = __p1_214; \ - float16x8_t __s2_214 = __p2_214; \ - float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ - float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ - float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_214 = __rev2_214; \ -uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ - __ret_214 = __noswap_vcmla_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ - __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ - __ret_214; \ +#define vcmla_laneq_f16(__p0_155, __p1_155, __p2_155, __p3_155) __extension__ ({ \ + float16x4_t __ret_155; \ + float16x4_t __s0_155 = __p0_155; \ + float16x4_t __s1_155 = __p1_155; \ + float16x8_t __s2_155 = __p2_155; \ + float16x4_t __rev0_155; __rev0_155 = __builtin_shufflevector(__s0_155, __s0_155, 3, 2, 1, 0); \ + float16x4_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \ + float16x8_t __rev2_155; __rev2_155 = __builtin_shufflevector(__s2_155, __s2_155, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_155 = __rev2_155; \ +uint32x2_t __reint1_155 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155)}; \ + __ret_155 = __noswap_vcmla_f16(__rev0_155, __rev1_155, *(float16x4_t *) &__reint1_155); \ + __ret_155 = __builtin_shufflevector(__ret_155, __ret_155, 3, 2, 1, 0); \ + __ret_155; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ - float16x8_t __ret_215; \ - float16x8_t __s0_215 = __p0_215; \ - float16x8_t __s1_215 = __p1_215; \ - float16x8_t __s2_215 = __p2_215; \ -float16x8_t __reint_215 = __s2_215; \ -uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ - __ret_215 = vcmlaq_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ - __ret_215; \ +#define vcmlaq_laneq_f16(__p0_156, __p1_156, __p2_156, __p3_156) __extension__ ({ \ + float16x8_t __ret_156; \ + float16x8_t __s0_156 = __p0_156; \ + float16x8_t __s1_156 = __p1_156; \ + float16x8_t __s2_156 = __p2_156; \ +float16x8_t __reint_156 = __s2_156; \ +uint32x4_t __reint1_156 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156)}; \ + __ret_156 = vcmlaq_f16(__s0_156, __s1_156, *(float16x8_t *) &__reint1_156); \ + __ret_156; \ }) #else -#define vcmlaq_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ - float16x8_t __ret_216; \ - float16x8_t __s0_216 = __p0_216; \ - float16x8_t __s1_216 = __p1_216; \ - float16x8_t __s2_216 = __p2_216; \ - float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_216 = __rev2_216; \ -uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ - __ret_216 = __noswap_vcmlaq_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ - __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_216; \ +#define vcmlaq_laneq_f16(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ + float16x8_t __ret_157; \ + float16x8_t __s0_157 = __p0_157; \ + float16x8_t __s1_157 = __p1_157; \ + float16x8_t __s2_157 = __p2_157; \ + float16x8_t __rev0_157; __rev0_157 = __builtin_shufflevector(__s0_157, __s0_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_157; __rev2_157 = __builtin_shufflevector(__s2_157, __s2_157, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_157 = __rev2_157; \ +uint32x4_t __reint1_157 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157)}; \ + __ret_157 = __noswap_vcmlaq_f16(__rev0_157, __rev1_157, *(float16x8_t *) &__reint1_157); \ + __ret_157 = __builtin_shufflevector(__ret_157, __ret_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_157; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -37012,7 +35963,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(flo __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; @@ -37020,13 +35971,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot18 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -37035,7 +35986,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(floa __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; @@ -37043,125 +35994,125 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot180 #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ - float16x4_t __ret_217; \ - float16x4_t __s0_217 = __p0_217; \ - float16x4_t __s1_217 = __p1_217; \ - float16x4_t __s2_217 = __p2_217; \ -float16x4_t __reint_217 = __s2_217; \ -uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ - __ret_217 = vcmla_rot180_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ - __ret_217; \ +#define vcmla_rot180_lane_f16(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ + float16x4_t __ret_158; \ + float16x4_t __s0_158 = __p0_158; \ + float16x4_t __s1_158 = __p1_158; \ + float16x4_t __s2_158 = __p2_158; \ +float16x4_t __reint_158 = __s2_158; \ +uint32x2_t __reint1_158 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158), vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158)}; \ + __ret_158 = vcmla_rot180_f16(__s0_158, __s1_158, *(float16x4_t *) &__reint1_158); \ + __ret_158; \ }) #else -#define vcmla_rot180_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ - float16x4_t __ret_218; \ - float16x4_t __s0_218 = __p0_218; \ - float16x4_t __s1_218 = __p1_218; \ - float16x4_t __s2_218 = __p2_218; \ - float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ - float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ - float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ -float16x4_t __reint_218 = __rev2_218; \ -uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ - __ret_218 = __noswap_vcmla_rot180_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ - __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ - __ret_218; \ +#define vcmla_rot180_lane_f16(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ + float16x4_t __ret_159; \ + float16x4_t __s0_159 = __p0_159; \ + float16x4_t __s1_159 = __p1_159; \ + float16x4_t __s2_159 = __p2_159; \ + float16x4_t __rev0_159; __rev0_159 = __builtin_shufflevector(__s0_159, __s0_159, 3, 2, 1, 0); \ + float16x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 3, 2, 1, 0); \ + float16x4_t __rev2_159; __rev2_159 = __builtin_shufflevector(__s2_159, __s2_159, 3, 2, 1, 0); \ +float16x4_t __reint_159 = __rev2_159; \ +uint32x2_t __reint1_159 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159)}; \ + __ret_159 = __noswap_vcmla_rot180_f16(__rev0_159, __rev1_159, *(float16x4_t *) &__reint1_159); \ + __ret_159 = __builtin_shufflevector(__ret_159, __ret_159, 3, 2, 1, 0); \ + __ret_159; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ - float16x8_t __ret_219; \ - float16x8_t __s0_219 = __p0_219; \ - float16x8_t __s1_219 = __p1_219; \ - float16x4_t __s2_219 = __p2_219; \ -float16x4_t __reint_219 = __s2_219; \ -uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ - __ret_219 = vcmlaq_rot180_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ - __ret_219; \ +#define vcmlaq_rot180_lane_f16(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ + float16x8_t __ret_160; \ + float16x8_t __s0_160 = __p0_160; \ + float16x8_t __s1_160 = __p1_160; \ + float16x4_t __s2_160 = __p2_160; \ +float16x4_t __reint_160 = __s2_160; \ +uint32x4_t __reint1_160 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160)}; \ + __ret_160 = vcmlaq_rot180_f16(__s0_160, __s1_160, *(float16x8_t *) &__reint1_160); \ + __ret_160; \ }) #else -#define vcmlaq_rot180_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ - float16x8_t __ret_220; \ - float16x8_t __s0_220 = __p0_220; \ - float16x8_t __s1_220 = __p1_220; \ - float16x4_t __s2_220 = __p2_220; \ - float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ -float16x4_t __reint_220 = __rev2_220; \ -uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ - __ret_220 = __noswap_vcmlaq_rot180_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ - __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_220; \ +#define vcmlaq_rot180_lane_f16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + float16x8_t __ret_161; \ + float16x8_t __s0_161 = __p0_161; \ + float16x8_t __s1_161 = __p1_161; \ + float16x4_t __s2_161 = __p2_161; \ + float16x8_t __rev0_161; __rev0_161 = __builtin_shufflevector(__s0_161, __s0_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_161; __rev2_161 = __builtin_shufflevector(__s2_161, __s2_161, 3, 2, 1, 0); \ +float16x4_t __reint_161 = __rev2_161; \ +uint32x4_t __reint1_161 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161)}; \ + __ret_161 = __noswap_vcmlaq_rot180_f16(__rev0_161, __rev1_161, *(float16x8_t *) &__reint1_161); \ + __ret_161 = __builtin_shufflevector(__ret_161, __ret_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_161; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ - float16x4_t __ret_221; \ - float16x4_t __s0_221 = __p0_221; \ - float16x4_t __s1_221 = __p1_221; \ - float16x8_t __s2_221 = __p2_221; \ -float16x8_t __reint_221 = __s2_221; \ -uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ - __ret_221 = vcmla_rot180_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ - __ret_221; \ +#define vcmla_rot180_laneq_f16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + float16x4_t __ret_162; \ + float16x4_t __s0_162 = __p0_162; \ + float16x4_t __s1_162 = __p1_162; \ + float16x8_t __s2_162 = __p2_162; \ +float16x8_t __reint_162 = __s2_162; \ +uint32x2_t __reint1_162 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162), vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162)}; \ + __ret_162 = vcmla_rot180_f16(__s0_162, __s1_162, *(float16x4_t *) &__reint1_162); \ + __ret_162; \ }) #else -#define vcmla_rot180_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ - float16x4_t __ret_222; \ - float16x4_t __s0_222 = __p0_222; \ - float16x4_t __s1_222 = __p1_222; \ - float16x8_t __s2_222 = __p2_222; \ - float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ - float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ - float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_222 = __rev2_222; \ -uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ - __ret_222 = __noswap_vcmla_rot180_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ - __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ - __ret_222; \ +#define vcmla_rot180_laneq_f16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + float16x4_t __ret_163; \ + float16x4_t __s0_163 = __p0_163; \ + float16x4_t __s1_163 = __p1_163; \ + float16x8_t __s2_163 = __p2_163; \ + float16x4_t __rev0_163; __rev0_163 = __builtin_shufflevector(__s0_163, __s0_163, 3, 2, 1, 0); \ + float16x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \ + float16x8_t __rev2_163; __rev2_163 = __builtin_shufflevector(__s2_163, __s2_163, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_163 = __rev2_163; \ +uint32x2_t __reint1_163 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163)}; \ + __ret_163 = __noswap_vcmla_rot180_f16(__rev0_163, __rev1_163, *(float16x4_t *) &__reint1_163); \ + __ret_163 = __builtin_shufflevector(__ret_163, __ret_163, 3, 2, 1, 0); \ + __ret_163; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ - float16x8_t __ret_223; \ - float16x8_t __s0_223 = __p0_223; \ - float16x8_t __s1_223 = __p1_223; \ - float16x8_t __s2_223 = __p2_223; \ -float16x8_t __reint_223 = __s2_223; \ -uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ - __ret_223 = vcmlaq_rot180_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ - __ret_223; \ +#define vcmlaq_rot180_laneq_f16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + float16x8_t __ret_164; \ + float16x8_t __s0_164 = __p0_164; \ + float16x8_t __s1_164 = __p1_164; \ + float16x8_t __s2_164 = __p2_164; \ +float16x8_t __reint_164 = __s2_164; \ +uint32x4_t __reint1_164 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164)}; \ + __ret_164 = vcmlaq_rot180_f16(__s0_164, __s1_164, *(float16x8_t *) &__reint1_164); \ + __ret_164; \ }) #else -#define vcmlaq_rot180_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ - float16x8_t __ret_224; \ - float16x8_t __s0_224 = __p0_224; \ - float16x8_t __s1_224 = __p1_224; \ - float16x8_t __s2_224 = __p2_224; \ - float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_224 = __rev2_224; \ -uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ - __ret_224 = __noswap_vcmlaq_rot180_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ - __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_224; \ +#define vcmlaq_rot180_laneq_f16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + float16x8_t __ret_165; \ + float16x8_t __s0_165 = __p0_165; \ + float16x8_t __s1_165 = __p1_165; \ + float16x8_t __s2_165 = __p2_165; \ + float16x8_t __rev0_165; __rev0_165 = __builtin_shufflevector(__s0_165, __s0_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_165; __rev2_165 = __builtin_shufflevector(__s2_165, __s2_165, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_165 = __rev2_165; \ +uint32x4_t __reint1_165 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165)}; \ + __ret_165 = __noswap_vcmlaq_rot180_f16(__rev0_165, __rev1_165, *(float16x8_t *) &__reint1_165); \ + __ret_165 = __builtin_shufflevector(__ret_165, __ret_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_165; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -37170,7 +36121,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(flo __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; @@ -37178,13 +36129,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot27 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -37193,7 +36144,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(floa __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; @@ -37201,125 +36152,125 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot270 #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ - float16x4_t __ret_225; \ - float16x4_t __s0_225 = __p0_225; \ - float16x4_t __s1_225 = __p1_225; \ - float16x4_t __s2_225 = __p2_225; \ -float16x4_t __reint_225 = __s2_225; \ -uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ - __ret_225 = vcmla_rot270_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ - __ret_225; \ +#define vcmla_rot270_lane_f16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + float16x4_t __ret_166; \ + float16x4_t __s0_166 = __p0_166; \ + float16x4_t __s1_166 = __p1_166; \ + float16x4_t __s2_166 = __p2_166; \ +float16x4_t __reint_166 = __s2_166; \ +uint32x2_t __reint1_166 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166), vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166)}; \ + __ret_166 = vcmla_rot270_f16(__s0_166, __s1_166, *(float16x4_t *) &__reint1_166); \ + __ret_166; \ }) #else -#define vcmla_rot270_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ - float16x4_t __ret_226; \ - float16x4_t __s0_226 = __p0_226; \ - float16x4_t __s1_226 = __p1_226; \ - float16x4_t __s2_226 = __p2_226; \ - float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ - float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ - float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ -float16x4_t __reint_226 = __rev2_226; \ -uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ - __ret_226 = __noswap_vcmla_rot270_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ - __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ - __ret_226; \ +#define vcmla_rot270_lane_f16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + float16x4_t __ret_167; \ + float16x4_t __s0_167 = __p0_167; \ + float16x4_t __s1_167 = __p1_167; \ + float16x4_t __s2_167 = __p2_167; \ + float16x4_t __rev0_167; __rev0_167 = __builtin_shufflevector(__s0_167, __s0_167, 3, 2, 1, 0); \ + float16x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \ + float16x4_t __rev2_167; __rev2_167 = __builtin_shufflevector(__s2_167, __s2_167, 3, 2, 1, 0); \ +float16x4_t __reint_167 = __rev2_167; \ +uint32x2_t __reint1_167 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167)}; \ + __ret_167 = __noswap_vcmla_rot270_f16(__rev0_167, __rev1_167, *(float16x4_t *) &__reint1_167); \ + __ret_167 = __builtin_shufflevector(__ret_167, __ret_167, 3, 2, 1, 0); \ + __ret_167; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ - float16x8_t __ret_227; \ - float16x8_t __s0_227 = __p0_227; \ - float16x8_t __s1_227 = __p1_227; \ - float16x4_t __s2_227 = __p2_227; \ -float16x4_t __reint_227 = __s2_227; \ -uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ - __ret_227 = vcmlaq_rot270_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ - __ret_227; \ +#define vcmlaq_rot270_lane_f16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + float16x8_t __ret_168; \ + float16x8_t __s0_168 = __p0_168; \ + float16x8_t __s1_168 = __p1_168; \ + float16x4_t __s2_168 = __p2_168; \ +float16x4_t __reint_168 = __s2_168; \ +uint32x4_t __reint1_168 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168)}; \ + __ret_168 = vcmlaq_rot270_f16(__s0_168, __s1_168, *(float16x8_t *) &__reint1_168); \ + __ret_168; \ }) #else -#define vcmlaq_rot270_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ - float16x8_t __ret_228; \ - float16x8_t __s0_228 = __p0_228; \ - float16x8_t __s1_228 = __p1_228; \ - float16x4_t __s2_228 = __p2_228; \ - float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ -float16x4_t __reint_228 = __rev2_228; \ -uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ - __ret_228 = __noswap_vcmlaq_rot270_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ - __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_228; \ +#define vcmlaq_rot270_lane_f16(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + float16x8_t __ret_169; \ + float16x8_t __s0_169 = __p0_169; \ + float16x8_t __s1_169 = __p1_169; \ + float16x4_t __s2_169 = __p2_169; \ + float16x8_t __rev0_169; __rev0_169 = __builtin_shufflevector(__s0_169, __s0_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_169; __rev2_169 = __builtin_shufflevector(__s2_169, __s2_169, 3, 2, 1, 0); \ +float16x4_t __reint_169 = __rev2_169; \ +uint32x4_t __reint1_169 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169)}; \ + __ret_169 = __noswap_vcmlaq_rot270_f16(__rev0_169, __rev1_169, *(float16x8_t *) &__reint1_169); \ + __ret_169 = __builtin_shufflevector(__ret_169, __ret_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_169; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ - float16x4_t __ret_229; \ - float16x4_t __s0_229 = __p0_229; \ - float16x4_t __s1_229 = __p1_229; \ - float16x8_t __s2_229 = __p2_229; \ -float16x8_t __reint_229 = __s2_229; \ -uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ - __ret_229 = vcmla_rot270_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ - __ret_229; \ +#define vcmla_rot270_laneq_f16(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + float16x4_t __ret_170; \ + float16x4_t __s0_170 = __p0_170; \ + float16x4_t __s1_170 = __p1_170; \ + float16x8_t __s2_170 = __p2_170; \ +float16x8_t __reint_170 = __s2_170; \ +uint32x2_t __reint1_170 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170), vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170)}; \ + __ret_170 = vcmla_rot270_f16(__s0_170, __s1_170, *(float16x4_t *) &__reint1_170); \ + __ret_170; \ }) #else -#define vcmla_rot270_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ - float16x4_t __ret_230; \ - float16x4_t __s0_230 = __p0_230; \ - float16x4_t __s1_230 = __p1_230; \ - float16x8_t __s2_230 = __p2_230; \ - float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ - float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ - float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_230 = __rev2_230; \ -uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ - __ret_230 = __noswap_vcmla_rot270_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ - __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ - __ret_230; \ +#define vcmla_rot270_laneq_f16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + float16x4_t __ret_171; \ + float16x4_t __s0_171 = __p0_171; \ + float16x4_t __s1_171 = __p1_171; \ + float16x8_t __s2_171 = __p2_171; \ + float16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, 3, 2, 1, 0); \ + float16x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \ + float16x8_t __rev2_171; __rev2_171 = __builtin_shufflevector(__s2_171, __s2_171, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_171 = __rev2_171; \ +uint32x2_t __reint1_171 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171)}; \ + __ret_171 = __noswap_vcmla_rot270_f16(__rev0_171, __rev1_171, *(float16x4_t *) &__reint1_171); \ + __ret_171 = __builtin_shufflevector(__ret_171, __ret_171, 3, 2, 1, 0); \ + __ret_171; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ - float16x8_t __ret_231; \ - float16x8_t __s0_231 = __p0_231; \ - float16x8_t __s1_231 = __p1_231; \ - float16x8_t __s2_231 = __p2_231; \ -float16x8_t __reint_231 = __s2_231; \ -uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ - __ret_231 = vcmlaq_rot270_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ - __ret_231; \ +#define vcmlaq_rot270_laneq_f16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + float16x8_t __ret_172; \ + float16x8_t __s0_172 = __p0_172; \ + float16x8_t __s1_172 = __p1_172; \ + float16x8_t __s2_172 = __p2_172; \ +float16x8_t __reint_172 = __s2_172; \ +uint32x4_t __reint1_172 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172)}; \ + __ret_172 = vcmlaq_rot270_f16(__s0_172, __s1_172, *(float16x8_t *) &__reint1_172); \ + __ret_172; \ }) #else -#define vcmlaq_rot270_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ - float16x8_t __ret_232; \ - float16x8_t __s0_232 = __p0_232; \ - float16x8_t __s1_232 = __p1_232; \ - float16x8_t __s2_232 = __p2_232; \ - float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_232 = __rev2_232; \ -uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ - __ret_232 = __noswap_vcmlaq_rot270_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ - __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_232; \ +#define vcmlaq_rot270_laneq_f16(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + float16x8_t __ret_173; \ + float16x8_t __s0_173 = __p0_173; \ + float16x8_t __s1_173 = __p1_173; \ + float16x8_t __s2_173 = __p2_173; \ + float16x8_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_173; __rev2_173 = __builtin_shufflevector(__s2_173, __s2_173, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_173 = __rev2_173; \ +uint32x4_t __reint1_173 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173)}; \ + __ret_173 = __noswap_vcmlaq_rot270_f16(__rev0_173, __rev1_173, *(float16x8_t *) &__reint1_173); \ + __ret_173 = __builtin_shufflevector(__ret_173, __ret_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_173; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -37328,7 +36279,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(floa __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; @@ -37336,13 +36287,13 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot90 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -37351,7 +36302,7 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; @@ -37359,1621 +36310,833 @@ __ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot90_ #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ - float16x4_t __ret_233; \ - float16x4_t __s0_233 = __p0_233; \ - float16x4_t __s1_233 = __p1_233; \ - float16x4_t __s2_233 = __p2_233; \ -float16x4_t __reint_233 = __s2_233; \ -uint32x2_t __reint1_233 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233), vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233)}; \ - __ret_233 = vcmla_rot90_f16(__s0_233, __s1_233, *(float16x4_t *) &__reint1_233); \ - __ret_233; \ +#define vcmla_rot90_lane_f16(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + float16x4_t __ret_174; \ + float16x4_t __s0_174 = __p0_174; \ + float16x4_t __s1_174 = __p1_174; \ + float16x4_t __s2_174 = __p2_174; \ +float16x4_t __reint_174 = __s2_174; \ +uint32x2_t __reint1_174 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174), vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174)}; \ + __ret_174 = vcmla_rot90_f16(__s0_174, __s1_174, *(float16x4_t *) &__reint1_174); \ + __ret_174; \ }) #else -#define vcmla_rot90_lane_f16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ - float16x4_t __ret_234; \ - float16x4_t __s0_234 = __p0_234; \ - float16x4_t __s1_234 = __p1_234; \ - float16x4_t __s2_234 = __p2_234; \ - float16x4_t __rev0_234; __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \ - float16x4_t __rev1_234; __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \ - float16x4_t __rev2_234; __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \ -float16x4_t __reint_234 = __rev2_234; \ -uint32x2_t __reint1_234 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234)}; \ - __ret_234 = __noswap_vcmla_rot90_f16(__rev0_234, __rev1_234, *(float16x4_t *) &__reint1_234); \ - __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \ - __ret_234; \ +#define vcmla_rot90_lane_f16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + float16x4_t __ret_175; \ + float16x4_t __s0_175 = __p0_175; \ + float16x4_t __s1_175 = __p1_175; \ + float16x4_t __s2_175 = __p2_175; \ + float16x4_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 3, 2, 1, 0); \ + float16x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \ + float16x4_t __rev2_175; __rev2_175 = __builtin_shufflevector(__s2_175, __s2_175, 3, 2, 1, 0); \ +float16x4_t __reint_175 = __rev2_175; \ +uint32x2_t __reint1_175 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175)}; \ + __ret_175 = __noswap_vcmla_rot90_f16(__rev0_175, __rev1_175, *(float16x4_t *) &__reint1_175); \ + __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 3, 2, 1, 0); \ + __ret_175; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ - float16x8_t __ret_235; \ - float16x8_t __s0_235 = __p0_235; \ - float16x8_t __s1_235 = __p1_235; \ - float16x4_t __s2_235 = __p2_235; \ -float16x4_t __reint_235 = __s2_235; \ -uint32x4_t __reint1_235 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235)}; \ - __ret_235 = vcmlaq_rot90_f16(__s0_235, __s1_235, *(float16x8_t *) &__reint1_235); \ - __ret_235; \ +#define vcmlaq_rot90_lane_f16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + float16x8_t __ret_176; \ + float16x8_t __s0_176 = __p0_176; \ + float16x8_t __s1_176 = __p1_176; \ + float16x4_t __s2_176 = __p2_176; \ +float16x4_t __reint_176 = __s2_176; \ +uint32x4_t __reint1_176 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176)}; \ + __ret_176 = vcmlaq_rot90_f16(__s0_176, __s1_176, *(float16x8_t *) &__reint1_176); \ + __ret_176; \ }) #else -#define vcmlaq_rot90_lane_f16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ - float16x8_t __ret_236; \ - float16x8_t __s0_236 = __p0_236; \ - float16x8_t __s1_236 = __p1_236; \ - float16x4_t __s2_236 = __p2_236; \ - float16x8_t __rev0_236; __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_236; __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_236; __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 3, 2, 1, 0); \ -float16x4_t __reint_236 = __rev2_236; \ -uint32x4_t __reint1_236 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236)}; \ - __ret_236 = __noswap_vcmlaq_rot90_f16(__rev0_236, __rev1_236, *(float16x8_t *) &__reint1_236); \ - __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_236; \ +#define vcmlaq_rot90_lane_f16(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float16x8_t __ret_177; \ + float16x8_t __s0_177 = __p0_177; \ + float16x8_t __s1_177 = __p1_177; \ + float16x4_t __s2_177 = __p2_177; \ + float16x8_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_177; __rev2_177 = __builtin_shufflevector(__s2_177, __s2_177, 3, 2, 1, 0); \ +float16x4_t __reint_177 = __rev2_177; \ +uint32x4_t __reint1_177 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177)}; \ + __ret_177 = __noswap_vcmlaq_rot90_f16(__rev0_177, __rev1_177, *(float16x8_t *) &__reint1_177); \ + __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_177; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ - float16x4_t __ret_237; \ - float16x4_t __s0_237 = __p0_237; \ - float16x4_t __s1_237 = __p1_237; \ - float16x8_t __s2_237 = __p2_237; \ -float16x8_t __reint_237 = __s2_237; \ -uint32x2_t __reint1_237 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237), vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237)}; \ - __ret_237 = vcmla_rot90_f16(__s0_237, __s1_237, *(float16x4_t *) &__reint1_237); \ - __ret_237; \ +#define vcmla_rot90_laneq_f16(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float16x4_t __ret_178; \ + float16x4_t __s0_178 = __p0_178; \ + float16x4_t __s1_178 = __p1_178; \ + float16x8_t __s2_178 = __p2_178; \ +float16x8_t __reint_178 = __s2_178; \ +uint32x2_t __reint1_178 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178), vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178)}; \ + __ret_178 = vcmla_rot90_f16(__s0_178, __s1_178, *(float16x4_t *) &__reint1_178); \ + __ret_178; \ }) #else -#define vcmla_rot90_laneq_f16(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ - float16x4_t __ret_238; \ - float16x4_t __s0_238 = __p0_238; \ - float16x4_t __s1_238 = __p1_238; \ - float16x8_t __s2_238 = __p2_238; \ - float16x4_t __rev0_238; __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 3, 2, 1, 0); \ - float16x4_t __rev1_238; __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 3, 2, 1, 0); \ - float16x8_t __rev2_238; __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_238 = __rev2_238; \ -uint32x2_t __reint1_238 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238)}; \ - __ret_238 = __noswap_vcmla_rot90_f16(__rev0_238, __rev1_238, *(float16x4_t *) &__reint1_238); \ - __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 3, 2, 1, 0); \ - __ret_238; \ +#define vcmla_rot90_laneq_f16(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float16x4_t __ret_179; \ + float16x4_t __s0_179 = __p0_179; \ + float16x4_t __s1_179 = __p1_179; \ + float16x8_t __s2_179 = __p2_179; \ + float16x4_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 3, 2, 1, 0); \ + float16x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 3, 2, 1, 0); \ + float16x8_t __rev2_179; __rev2_179 = __builtin_shufflevector(__s2_179, __s2_179, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_179 = __rev2_179; \ +uint32x2_t __reint1_179 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179)}; \ + __ret_179 = __noswap_vcmla_rot90_f16(__rev0_179, __rev1_179, *(float16x4_t *) &__reint1_179); \ + __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \ + __ret_179; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ - float16x8_t __ret_239; \ - float16x8_t __s0_239 = __p0_239; \ - float16x8_t __s1_239 = __p1_239; \ - float16x8_t __s2_239 = __p2_239; \ -float16x8_t __reint_239 = __s2_239; \ -uint32x4_t __reint1_239 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239)}; \ - __ret_239 = vcmlaq_rot90_f16(__s0_239, __s1_239, *(float16x8_t *) &__reint1_239); \ - __ret_239; \ +#define vcmlaq_rot90_laneq_f16(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float16x8_t __ret_180; \ + float16x8_t __s0_180 = __p0_180; \ + float16x8_t __s1_180 = __p1_180; \ + float16x8_t __s2_180 = __p2_180; \ +float16x8_t __reint_180 = __s2_180; \ +uint32x4_t __reint1_180 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180)}; \ + __ret_180 = vcmlaq_rot90_f16(__s0_180, __s1_180, *(float16x8_t *) &__reint1_180); \ + __ret_180; \ }) #else -#define vcmlaq_rot90_laneq_f16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ - float16x8_t __ret_240; \ - float16x8_t __s0_240 = __p0_240; \ - float16x8_t __s1_240 = __p1_240; \ - float16x8_t __s2_240 = __p2_240; \ - float16x8_t __rev0_240; __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_240; __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_240; __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_240 = __rev2_240; \ -uint32x4_t __reint1_240 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240)}; \ - __ret_240 = __noswap_vcmlaq_rot90_f16(__rev0_240, __rev1_240, *(float16x8_t *) &__reint1_240); \ - __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_240; \ -}) -#endif - -#if !defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s32(__p0_241, __p1_241, __p2_241) __extension__ ({ \ - int32x4_t __ret_241; \ - int32x4_t __s0_241 = __p0_241; \ - int32x2_t __s1_241 = __p1_241; \ - __ret_241 = vqdmulhq_s32(__s0_241, splatq_lane_s32(__s1_241, __p2_241)); \ - __ret_241; \ -}) -#else -#define vqdmulhq_lane_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \ - int32x4_t __ret_242; \ - int32x4_t __s0_242 = __p0_242; \ - int32x2_t __s1_242 = __p1_242; \ - int32x4_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \ - int32x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ - __ret_242 = __noswap_vqdmulhq_s32(__rev0_242, __noswap_splatq_lane_s32(__rev1_242, __p2_242)); \ - __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \ - __ret_242; \ +#define vcmlaq_rot90_laneq_f16(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float16x8_t __ret_181; \ + float16x8_t __s0_181 = __p0_181; \ + float16x8_t __s1_181 = __p1_181; \ + float16x8_t __s2_181 = __p2_181; \ + float16x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_181; __rev2_181 = __builtin_shufflevector(__s2_181, __s2_181, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_181 = __rev2_181; \ +uint32x4_t __reint1_181 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181)}; \ + __ret_181 = __noswap_vcmlaq_rot90_f16(__rev0_181, __rev1_181, *(float16x8_t *) &__reint1_181); \ + __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_181; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s16(__p0_243, __p1_243, __p2_243) __extension__ ({ \ - int16x8_t __ret_243; \ - int16x8_t __s0_243 = __p0_243; \ - int16x4_t __s1_243 = __p1_243; \ - __ret_243 = vqdmulhq_s16(__s0_243, splatq_lane_s16(__s1_243, __p2_243)); \ - __ret_243; \ -}) -#else -#define vqdmulhq_lane_s16(__p0_244, __p1_244, __p2_244) __extension__ ({ \ - int16x8_t __ret_244; \ - int16x8_t __s0_244 = __p0_244; \ - int16x4_t __s1_244 = __p1_244; \ - int16x8_t __rev0_244; __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_244; __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 3, 2, 1, 0); \ - __ret_244 = __noswap_vqdmulhq_s16(__rev0_244, __noswap_splatq_lane_s16(__rev1_244, __p2_244)); \ - __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_244; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s32(__p0_245, __p1_245, __p2_245) __extension__ ({ \ - int32x2_t __ret_245; \ - int32x2_t __s0_245 = __p0_245; \ - int32x2_t __s1_245 = __p1_245; \ - __ret_245 = vqdmulh_s32(__s0_245, splat_lane_s32(__s1_245, __p2_245)); \ - __ret_245; \ -}) -#else -#define vqdmulh_lane_s32(__p0_246, __p1_246, __p2_246) __extension__ ({ \ - int32x2_t __ret_246; \ - int32x2_t __s0_246 = __p0_246; \ - int32x2_t __s1_246 = __p1_246; \ - int32x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ - int32x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ - __ret_246 = __noswap_vqdmulh_s32(__rev0_246, __noswap_splat_lane_s32(__rev1_246, __p2_246)); \ - __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ - __ret_246; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \ - int16x4_t __ret_247; \ - int16x4_t __s0_247 = __p0_247; \ - int16x4_t __s1_247 = __p1_247; \ - __ret_247 = vqdmulh_s16(__s0_247, splat_lane_s16(__s1_247, __p2_247)); \ - __ret_247; \ -}) -#else -#define vqdmulh_lane_s16(__p0_248, __p1_248, __p2_248) __extension__ ({ \ - int16x4_t __ret_248; \ - int16x4_t __s0_248 = __p0_248; \ - int16x4_t __s1_248 = __p1_248; \ - int16x4_t __rev0_248; __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \ - int16x4_t __rev1_248; __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \ - __ret_248 = __noswap_vqdmulh_s16(__rev0_248, __noswap_splat_lane_s16(__rev1_248, __p2_248)); \ - __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \ - __ret_248; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \ - int32x4_t __ret_249; \ - int32x4_t __s0_249 = __p0_249; \ - int32x2_t __s1_249 = __p1_249; \ - __ret_249 = vqrdmulhq_s32(__s0_249, splatq_lane_s32(__s1_249, __p2_249)); \ - __ret_249; \ -}) -#else -#define vqrdmulhq_lane_s32(__p0_250, __p1_250, __p2_250) __extension__ ({ \ - int32x4_t __ret_250; \ - int32x4_t __s0_250 = __p0_250; \ - int32x2_t __s1_250 = __p1_250; \ - int32x4_t __rev0_250; __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 3, 2, 1, 0); \ - int32x2_t __rev1_250; __rev1_250 = __builtin_shufflevector(__s1_250, __s1_250, 1, 0); \ - __ret_250 = __noswap_vqrdmulhq_s32(__rev0_250, __noswap_splatq_lane_s32(__rev1_250, __p2_250)); \ - __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 3, 2, 1, 0); \ - __ret_250; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s16(__p0_251, __p1_251, __p2_251) __extension__ ({ \ - int16x8_t __ret_251; \ - int16x8_t __s0_251 = __p0_251; \ - int16x4_t __s1_251 = __p1_251; \ - __ret_251 = vqrdmulhq_s16(__s0_251, splatq_lane_s16(__s1_251, __p2_251)); \ - __ret_251; \ -}) -#else -#define vqrdmulhq_lane_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \ - int16x8_t __ret_252; \ - int16x8_t __s0_252 = __p0_252; \ - int16x4_t __s1_252 = __p1_252; \ - int16x8_t __rev0_252; __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_252; __rev1_252 = __builtin_shufflevector(__s1_252, __s1_252, 3, 2, 1, 0); \ - __ret_252 = __noswap_vqrdmulhq_s16(__rev0_252, __noswap_splatq_lane_s16(__rev1_252, __p2_252)); \ - __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_252; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \ - int32x2_t __ret_253; \ - int32x2_t __s0_253 = __p0_253; \ - int32x2_t __s1_253 = __p1_253; \ - __ret_253 = vqrdmulh_s32(__s0_253, splat_lane_s32(__s1_253, __p2_253)); \ - __ret_253; \ -}) -#else -#define vqrdmulh_lane_s32(__p0_254, __p1_254, __p2_254) __extension__ ({ \ - int32x2_t __ret_254; \ - int32x2_t __s0_254 = __p0_254; \ - int32x2_t __s1_254 = __p1_254; \ - int32x2_t __rev0_254; __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 1, 0); \ - int32x2_t __rev1_254; __rev1_254 = __builtin_shufflevector(__s1_254, __s1_254, 1, 0); \ - __ret_254 = __noswap_vqrdmulh_s32(__rev0_254, __noswap_splat_lane_s32(__rev1_254, __p2_254)); \ - __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 1, 0); \ - __ret_254; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s16(__p0_255, __p1_255, __p2_255) __extension__ ({ \ - int16x4_t __ret_255; \ - int16x4_t __s0_255 = __p0_255; \ - int16x4_t __s1_255 = __p1_255; \ - __ret_255 = vqrdmulh_s16(__s0_255, splat_lane_s16(__s1_255, __p2_255)); \ - __ret_255; \ -}) -#else -#define vqrdmulh_lane_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \ - int16x4_t __ret_256; \ - int16x4_t __s0_256 = __p0_256; \ - int16x4_t __s1_256 = __p1_256; \ - int16x4_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \ - int16x4_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 3, 2, 1, 0); \ - __ret_256 = __noswap_vqrdmulh_s16(__rev0_256, __noswap_splat_lane_s16(__rev1_256, __p2_256)); \ - __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \ - __ret_256; \ -}) -#endif - -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; - __ret = (float32x2_t)(__p0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __ret_182; \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x2_t __s2_182 = __p2_182; \ +float32x2_t __reint_182 = __s2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_182, __p3_182)}; \ + __ret_182 = vcmla_f32(__s0_182, __s1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182; \ +}) +#else +#define vcmla_lane_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x2_t __ret_183; \ + float32x2_t __s0_183 = __p0_183; \ + float32x2_t __s1_183 = __p1_183; \ + float32x2_t __s2_183 = __p2_183; \ + float32x2_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 1, 0); \ + float32x2_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 1, 0); \ + float32x2_t __rev2_183; __rev2_183 = __builtin_shufflevector(__s2_183, __s2_183, 1, 0); \ +float32x2_t __reint_183 = __rev2_183; \ +uint64x1_t __reint1_183 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_183, __p3_183)}; \ + __ret_183 = __noswap_vcmla_f32(__rev0_183, __rev1_183, *(float32x2_t *) &__reint1_183); \ + __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 1, 0); \ + __ret_183; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __ret_184; \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x2_t __s2_184 = __p2_184; \ +float32x2_t __reint_184 = __s2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184), vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184)}; \ + __ret_184 = vcmlaq_f32(__s0_184, __s1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184; \ +}) +#else +#define vcmlaq_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x4_t __ret_185; \ + float32x4_t __s0_185 = __p0_185; \ + float32x4_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ + float32x4_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 3, 2, 1, 0); \ + float32x4_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 3, 2, 1, 0); \ + float32x2_t __rev2_185; __rev2_185 = __builtin_shufflevector(__s2_185, __s2_185, 1, 0); \ +float32x2_t __reint_185 = __rev2_185; \ +uint64x2_t __reint1_185 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185), vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = __noswap_vcmlaq_f32(__rev0_185, __rev1_185, *(float32x4_t *) &__reint1_185); \ + __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \ + __ret_185; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __ret_186; \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x4_t __s2_186 = __p2_186; \ +float32x4_t __reint_186 = __s2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_186, __p3_186)}; \ + __ret_186 = vcmla_f32(__s0_186, __s1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186; \ +}) +#else +#define vcmla_laneq_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x2_t __ret_187; \ + float32x2_t __s0_187 = __p0_187; \ + float32x2_t __s1_187 = __p1_187; \ + float32x4_t __s2_187 = __p2_187; \ + float32x2_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 1, 0); \ + float32x2_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 1, 0); \ + float32x4_t __rev2_187; __rev2_187 = __builtin_shufflevector(__s2_187, __s2_187, 3, 2, 1, 0); \ +float32x4_t __reint_187 = __rev2_187; \ +uint64x1_t __reint1_187 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_187, __p3_187)}; \ + __ret_187 = __noswap_vcmla_f32(__rev0_187, __rev1_187, *(float32x2_t *) &__reint1_187); \ + __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 1, 0); \ + __ret_187; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __ret_188; \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x4_t __s2_188 = __p2_188; \ +float32x4_t __reint_188 = __s2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188), vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188)}; \ + __ret_188 = vcmlaq_f32(__s0_188, __s1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188; \ +}) +#else +#define vcmlaq_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x4_t __ret_189; \ + float32x4_t __s0_189 = __p0_189; \ + float32x4_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ + float32x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \ + float32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \ + float32x4_t __rev2_189; __rev2_189 = __builtin_shufflevector(__s2_189, __s2_189, 3, 2, 1, 0); \ +float32x4_t __reint_189 = __rev2_189; \ +uint64x2_t __reint1_189 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = __noswap_vcmlaq_f32(__rev0_189, __rev1_189, *(float32x4_t *) &__reint1_189); \ + __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \ + __ret_189; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __ret_190; \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x2_t __s2_190 = __p2_190; \ +float32x2_t __reint_190 = __s2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_190, __p3_190)}; \ + __ret_190 = vcmla_rot180_f32(__s0_190, __s1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190; \ +}) +#else +#define vcmla_rot180_lane_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x2_t __ret_191; \ + float32x2_t __s0_191 = __p0_191; \ + float32x2_t __s1_191 = __p1_191; \ + float32x2_t __s2_191 = __p2_191; \ + float32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \ + float32x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \ + float32x2_t __rev2_191; __rev2_191 = __builtin_shufflevector(__s2_191, __s2_191, 1, 0); \ +float32x2_t __reint_191 = __rev2_191; \ +uint64x1_t __reint1_191 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_191, __p3_191)}; \ + __ret_191 = __noswap_vcmla_rot180_f32(__rev0_191, __rev1_191, *(float32x2_t *) &__reint1_191); \ + __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 1, 0); \ + __ret_191; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __ret_192; \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x2_t __s2_192 = __p2_192; \ +float32x2_t __reint_192 = __s2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192), vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192)}; \ + __ret_192 = vcmlaq_rot180_f32(__s0_192, __s1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192; \ +}) +#else +#define vcmlaq_rot180_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x4_t __ret_193; \ + float32x4_t __s0_193 = __p0_193; \ + float32x4_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ + float32x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \ + float32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \ + float32x2_t __rev2_193; __rev2_193 = __builtin_shufflevector(__s2_193, __s2_193, 1, 0); \ +float32x2_t __reint_193 = __rev2_193; \ +uint64x2_t __reint1_193 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193), vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = __noswap_vcmlaq_rot180_f32(__rev0_193, __rev1_193, *(float32x4_t *) &__reint1_193); \ + __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 3, 2, 1, 0); \ + __ret_193; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __ret_194; \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x4_t __s2_194 = __p2_194; \ +float32x4_t __reint_194 = __s2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_194, __p3_194)}; \ + __ret_194 = vcmla_rot180_f32(__s0_194, __s1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194; \ +}) +#else +#define vcmla_rot180_laneq_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x2_t __ret_195; \ + float32x2_t __s0_195 = __p0_195; \ + float32x2_t __s1_195 = __p1_195; \ + float32x4_t __s2_195 = __p2_195; \ + float32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \ + float32x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \ + float32x4_t __rev2_195; __rev2_195 = __builtin_shufflevector(__s2_195, __s2_195, 3, 2, 1, 0); \ +float32x4_t __reint_195 = __rev2_195; \ +uint64x1_t __reint1_195 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_195, __p3_195)}; \ + __ret_195 = __noswap_vcmla_rot180_f32(__rev0_195, __rev1_195, *(float32x2_t *) &__reint1_195); \ + __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 1, 0); \ + __ret_195; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __ret_196; \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x4_t __s2_196 = __p2_196; \ +float32x4_t __reint_196 = __s2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196), vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196)}; \ + __ret_196 = vcmlaq_rot180_f32(__s0_196, __s1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x4_t __ret_197; \ + float32x4_t __s0_197 = __p0_197; \ + float32x4_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ + float32x4_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 3, 2, 1, 0); \ + float32x4_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 3, 2, 1, 0); \ + float32x4_t __rev2_197; __rev2_197 = __builtin_shufflevector(__s2_197, __s2_197, 3, 2, 1, 0); \ +float32x4_t __reint_197 = __rev2_197; \ +uint64x2_t __reint1_197 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = __noswap_vcmlaq_rot180_f32(__rev0_197, __rev1_197, *(float32x4_t *) &__reint1_197); \ + __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \ + __ret_197; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __ret_198; \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x2_t __s2_198 = __p2_198; \ +float32x2_t __reint_198 = __s2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_198, __p3_198)}; \ + __ret_198 = vcmla_rot270_f32(__s0_198, __s1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198; \ +}) +#else +#define vcmla_rot270_lane_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x2_t __ret_199; \ + float32x2_t __s0_199 = __p0_199; \ + float32x2_t __s1_199 = __p1_199; \ + float32x2_t __s2_199 = __p2_199; \ + float32x2_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 1, 0); \ + float32x2_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 1, 0); \ + float32x2_t __rev2_199; __rev2_199 = __builtin_shufflevector(__s2_199, __s2_199, 1, 0); \ +float32x2_t __reint_199 = __rev2_199; \ +uint64x1_t __reint1_199 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_199, __p3_199)}; \ + __ret_199 = __noswap_vcmla_rot270_f32(__rev0_199, __rev1_199, *(float32x2_t *) &__reint1_199); \ + __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 1, 0); \ + __ret_199; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __ret_200; \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x2_t __s2_200 = __p2_200; \ +float32x2_t __reint_200 = __s2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200), vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200)}; \ + __ret_200 = vcmlaq_rot270_f32(__s0_200, __s1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200; \ +}) +#else +#define vcmlaq_rot270_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float32x4_t __ret_201; \ + float32x4_t __s0_201 = __p0_201; \ + float32x4_t __s1_201 = __p1_201; \ + float32x2_t __s2_201 = __p2_201; \ + float32x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \ + float32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \ + float32x2_t __rev2_201; __rev2_201 = __builtin_shufflevector(__s2_201, __s2_201, 1, 0); \ +float32x2_t __reint_201 = __rev2_201; \ +uint64x2_t __reint1_201 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201), vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ + __ret_201 = __noswap_vcmlaq_rot270_f32(__rev0_201, __rev1_201, *(float32x4_t *) &__reint1_201); \ + __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \ + __ret_201; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float32x2_t __ret_202; \ + float32x2_t __s0_202 = __p0_202; \ + float32x2_t __s1_202 = __p1_202; \ + float32x4_t __s2_202 = __p2_202; \ +float32x4_t __reint_202 = __s2_202; \ +uint64x1_t __reint1_202 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_202, __p3_202)}; \ + __ret_202 = vcmla_rot270_f32(__s0_202, __s1_202, *(float32x2_t *) &__reint1_202); \ + __ret_202; \ +}) +#else +#define vcmla_rot270_laneq_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float32x2_t __ret_203; \ + float32x2_t __s0_203 = __p0_203; \ + float32x2_t __s1_203 = __p1_203; \ + float32x4_t __s2_203 = __p2_203; \ + float32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \ + float32x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \ + float32x4_t __rev2_203; __rev2_203 = __builtin_shufflevector(__s2_203, __s2_203, 3, 2, 1, 0); \ +float32x4_t __reint_203 = __rev2_203; \ +uint64x1_t __reint1_203 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_203, __p3_203)}; \ + __ret_203 = __noswap_vcmla_rot270_f32(__rev0_203, __rev1_203, *(float32x2_t *) &__reint1_203); \ + __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 1, 0); \ + __ret_203; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float32x4_t __ret_204; \ + float32x4_t __s0_204 = __p0_204; \ + float32x4_t __s1_204 = __p1_204; \ + float32x4_t __s2_204 = __p2_204; \ +float32x4_t __reint_204 = __s2_204; \ +uint64x2_t __reint1_204 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204), vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204)}; \ + __ret_204 = vcmlaq_rot270_f32(__s0_204, __s1_204, *(float32x4_t *) &__reint1_204); \ + __ret_204; \ +}) +#else +#define vcmlaq_rot270_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float32x4_t __ret_205; \ + float32x4_t __s0_205 = __p0_205; \ + float32x4_t __s1_205 = __p1_205; \ + float32x4_t __s2_205 = __p2_205; \ + float32x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \ + float32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \ + float32x4_t __rev2_205; __rev2_205 = __builtin_shufflevector(__s2_205, __s2_205, 3, 2, 1, 0); \ +float32x4_t __reint_205 = __rev2_205; \ +uint64x2_t __reint1_205 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ + __ret_205 = __noswap_vcmlaq_rot270_f32(__rev0_205, __rev1_205, *(float32x4_t *) &__reint1_205); \ + __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 3, 2, 1, 0); \ + __ret_205; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float32x2_t __ret_206; \ + float32x2_t __s0_206 = __p0_206; \ + float32x2_t __s1_206 = __p1_206; \ + float32x2_t __s2_206 = __p2_206; \ +float32x2_t __reint_206 = __s2_206; \ +uint64x1_t __reint1_206 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_206, __p3_206)}; \ + __ret_206 = vcmla_rot90_f32(__s0_206, __s1_206, *(float32x2_t *) &__reint1_206); \ + __ret_206; \ +}) +#else +#define vcmla_rot90_lane_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float32x2_t __ret_207; \ + float32x2_t __s0_207 = __p0_207; \ + float32x2_t __s1_207 = __p1_207; \ + float32x2_t __s2_207 = __p2_207; \ + float32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \ + float32x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \ + float32x2_t __rev2_207; __rev2_207 = __builtin_shufflevector(__s2_207, __s2_207, 1, 0); \ +float32x2_t __reint_207 = __rev2_207; \ +uint64x1_t __reint1_207 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_207, __p3_207)}; \ + __ret_207 = __noswap_vcmla_rot90_f32(__rev0_207, __rev1_207, *(float32x2_t *) &__reint1_207); \ + __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 1, 0); \ + __ret_207; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float32x4_t __ret_208; \ + float32x4_t __s0_208 = __p0_208; \ + float32x4_t __s1_208 = __p1_208; \ + float32x2_t __s2_208 = __p2_208; \ +float32x2_t __reint_208 = __s2_208; \ +uint64x2_t __reint1_208 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208), vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208)}; \ + __ret_208 = vcmlaq_rot90_f32(__s0_208, __s1_208, *(float32x4_t *) &__reint1_208); \ + __ret_208; \ +}) +#else +#define vcmlaq_rot90_lane_f32(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float32x4_t __ret_209; \ + float32x4_t __s0_209 = __p0_209; \ + float32x4_t __s1_209 = __p1_209; \ + float32x2_t __s2_209 = __p2_209; \ + float32x4_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 3, 2, 1, 0); \ + float32x4_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 3, 2, 1, 0); \ + float32x2_t __rev2_209; __rev2_209 = __builtin_shufflevector(__s2_209, __s2_209, 1, 0); \ +float32x2_t __reint_209 = __rev2_209; \ +uint64x2_t __reint1_209 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209), vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209)}; \ + __ret_209 = __noswap_vcmlaq_rot90_f32(__rev0_209, __rev1_209, *(float32x4_t *) &__reint1_209); \ + __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \ + __ret_209; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f32(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float32x2_t __ret_210; \ + float32x2_t __s0_210 = __p0_210; \ + float32x2_t __s1_210 = __p1_210; \ + float32x4_t __s2_210 = __p2_210; \ +float32x4_t __reint_210 = __s2_210; \ +uint64x1_t __reint1_210 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = vcmla_rot90_f32(__s0_210, __s1_210, *(float32x2_t *) &__reint1_210); \ + __ret_210; \ +}) +#else +#define vcmla_rot90_laneq_f32(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float32x2_t __ret_211; \ + float32x2_t __s0_211 = __p0_211; \ + float32x2_t __s1_211 = __p1_211; \ + float32x4_t __s2_211 = __p2_211; \ + float32x2_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 1, 0); \ + float32x2_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 1, 0); \ + float32x4_t __rev2_211; __rev2_211 = __builtin_shufflevector(__s2_211, __s2_211, 3, 2, 1, 0); \ +float32x4_t __reint_211 = __rev2_211; \ +uint64x1_t __reint1_211 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = __noswap_vcmla_rot90_f32(__rev0_211, __rev1_211, *(float32x2_t *) &__reint1_211); \ + __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 1, 0); \ + __ret_211; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f32(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float32x4_t __ret_212; \ + float32x4_t __s0_212 = __p0_212; \ + float32x4_t __s1_212 = __p1_212; \ + float32x4_t __s2_212 = __p2_212; \ +float32x4_t __reint_212 = __s2_212; \ +uint64x2_t __reint1_212 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212), vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = vcmlaq_rot90_f32(__s0_212, __s1_212, *(float32x4_t *) &__reint1_212); \ + __ret_212; \ +}) +#else +#define vcmlaq_rot90_laneq_f32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float32x4_t __ret_213; \ + float32x4_t __s0_213 = __p0_213; \ + float32x4_t __s1_213 = __p1_213; \ + float32x4_t __s2_213 = __p2_213; \ + float32x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \ + float32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \ + float32x4_t __rev2_213; __rev2_213 = __builtin_shufflevector(__s2_213, __s2_213, 3, 2, 1, 0); \ +float32x4_t __reint_213 = __rev2_213; \ +uint64x2_t __reint1_213 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213)}; \ + __ret_213 = __noswap_vcmlaq_rot90_f32(__rev0_213, __rev1_213, *(float32x4_t *) &__reint1_213); \ + __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \ + __ret_213; \ +}) +#endif + +#if !defined(__aarch64__) && !defined(__arm64ec__) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); return __ret; @@ -38981,13 +37144,13 @@ __ai __attribute__((target("bf16"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(f #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; __ret = __a32_vcvt_bf16_f32(__p0); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { bfloat16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap___a32_vcvt_bf16_f32(__rev0); @@ -38997,13 +37160,13 @@ __ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { bfloat16x8_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -39014,13 +37177,13 @@ __ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); @@ -39029,283 +37192,1771 @@ __ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t } #endif -__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { bfloat16x8_t __ret; __ret = (bfloat16x8_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { bfloat16x4_t __ret; __ret = (bfloat16x4_t)(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_214, __p1_214, __p2_214) __extension__ ({ \ + int32x4_t __ret_214; \ + int32x4_t __s0_214 = __p0_214; \ + int32x2_t __s1_214 = __p1_214; \ + __ret_214 = vqdmulhq_s32(__s0_214, splatq_lane_s32(__s1_214, __p2_214)); \ + __ret_214; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_215, __p1_215, __p2_215) __extension__ ({ \ + int32x4_t __ret_215; \ + int32x4_t __s0_215 = __p0_215; \ + int32x2_t __s1_215 = __p1_215; \ + int32x4_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 3, 2, 1, 0); \ + int32x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \ + __ret_215 = __noswap_vqdmulhq_s32(__rev0_215, __noswap_splatq_lane_s32(__rev1_215, __p2_215)); \ + __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \ + __ret_215; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \ + int16x8_t __ret_216; \ + int16x8_t __s0_216 = __p0_216; \ + int16x4_t __s1_216 = __p1_216; \ + __ret_216 = vqdmulhq_s16(__s0_216, splatq_lane_s16(__s1_216, __p2_216)); \ + __ret_216; \ +}) +#else +#define vqdmulhq_lane_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \ + int16x8_t __ret_217; \ + int16x8_t __s0_217 = __p0_217; \ + int16x4_t __s1_217 = __p1_217; \ + int16x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \ + __ret_217 = __noswap_vqdmulhq_s16(__rev0_217, __noswap_splatq_lane_s16(__rev1_217, __p2_217)); \ + __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_217; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \ + int32x2_t __ret_218; \ + int32x2_t __s0_218 = __p0_218; \ + int32x2_t __s1_218 = __p1_218; \ + __ret_218 = vqdmulh_s32(__s0_218, splat_lane_s32(__s1_218, __p2_218)); \ + __ret_218; \ +}) +#else +#define vqdmulh_lane_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \ + int32x2_t __ret_219; \ + int32x2_t __s0_219 = __p0_219; \ + int32x2_t __s1_219 = __p1_219; \ + int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \ + int32x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \ + __ret_219 = __noswap_vqdmulh_s32(__rev0_219, __noswap_splat_lane_s32(__rev1_219, __p2_219)); \ + __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 1, 0); \ + __ret_219; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \ + int16x4_t __ret_220; \ + int16x4_t __s0_220 = __p0_220; \ + int16x4_t __s1_220 = __p1_220; \ + __ret_220 = vqdmulh_s16(__s0_220, splat_lane_s16(__s1_220, __p2_220)); \ + __ret_220; \ +}) +#else +#define vqdmulh_lane_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \ + int16x4_t __ret_221; \ + int16x4_t __s0_221 = __p0_221; \ + int16x4_t __s1_221 = __p1_221; \ + int16x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 3, 2, 1, 0); \ + int16x4_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 3, 2, 1, 0); \ + __ret_221 = __noswap_vqdmulh_s16(__rev0_221, __noswap_splat_lane_s16(__rev1_221, __p2_221)); \ + __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \ + __ret_221; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0_222, __p1_222, __p2_222) __extension__ ({ \ + int32x4_t __ret_222; \ + int32x4_t __s0_222 = __p0_222; \ + int32x2_t __s1_222 = __p1_222; \ + __ret_222 = vqrdmulhq_s32(__s0_222, splatq_lane_s32(__s1_222, __p2_222)); \ + __ret_222; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0_223, __p1_223, __p2_223) __extension__ ({ \ + int32x4_t __ret_223; \ + int32x4_t __s0_223 = __p0_223; \ + int32x2_t __s1_223 = __p1_223; \ + int32x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 3, 2, 1, 0); \ + int32x2_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 1, 0); \ + __ret_223 = __noswap_vqrdmulhq_s32(__rev0_223, __noswap_splatq_lane_s32(__rev1_223, __p2_223)); \ + __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \ + __ret_223; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0_224, __p1_224, __p2_224) __extension__ ({ \ + int16x8_t __ret_224; \ + int16x8_t __s0_224 = __p0_224; \ + int16x4_t __s1_224 = __p1_224; \ + __ret_224 = vqrdmulhq_s16(__s0_224, splatq_lane_s16(__s1_224, __p2_224)); \ + __ret_224; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0_225, __p1_225, __p2_225) __extension__ ({ \ + int16x8_t __ret_225; \ + int16x8_t __s0_225 = __p0_225; \ + int16x4_t __s1_225 = __p1_225; \ + int16x8_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 3, 2, 1, 0); \ + __ret_225 = __noswap_vqrdmulhq_s16(__rev0_225, __noswap_splatq_lane_s16(__rev1_225, __p2_225)); \ + __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_225; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0_226, __p1_226, __p2_226) __extension__ ({ \ + int32x2_t __ret_226; \ + int32x2_t __s0_226 = __p0_226; \ + int32x2_t __s1_226 = __p1_226; \ + __ret_226 = vqrdmulh_s32(__s0_226, splat_lane_s32(__s1_226, __p2_226)); \ + __ret_226; \ +}) +#else +#define vqrdmulh_lane_s32(__p0_227, __p1_227, __p2_227) __extension__ ({ \ + int32x2_t __ret_227; \ + int32x2_t __s0_227 = __p0_227; \ + int32x2_t __s1_227 = __p1_227; \ + int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 1, 0); \ + int32x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 1, 0); \ + __ret_227 = __noswap_vqrdmulh_s32(__rev0_227, __noswap_splat_lane_s32(__rev1_227, __p2_227)); \ + __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \ + __ret_227; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \ + int16x4_t __ret_228; \ + int16x4_t __s0_228 = __p0_228; \ + int16x4_t __s1_228 = __p1_228; \ + __ret_228 = vqrdmulh_s16(__s0_228, splat_lane_s16(__s1_228, __p2_228)); \ + __ret_228; \ +}) +#else +#define vqrdmulh_lane_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \ + int16x4_t __ret_229; \ + int16x4_t __s0_229 = __p0_229; \ + int16x4_t __s1_229 = __p1_229; \ + int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \ + int16x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 3, 2, 1, 0); \ + __ret_229 = __noswap_vqrdmulh_s16(__rev0_229, __noswap_splat_lane_s16(__rev1_229, __p2_229)); \ + __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \ + __ret_229; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} #endif #if (__ARM_FP & 2) #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; @@ -39313,20 +38964,20 @@ __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; @@ -40237,272 +39888,211 @@ __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { }) #endif +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} #endif #if __ARM_ARCH >= 8 #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -40513,13 +40103,13 @@ __ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -40530,13 +40120,13 @@ __ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); return __ret; } #else -__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); @@ -40546,13 +40136,13 @@ __ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); return __ret; } #else -__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); @@ -40562,13 +40152,269 @@ __ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40578,19 +40424,19 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uin } #endif -__ai __attribute__((target("sha2"))) uint32_t vsha1h_u32(uint32_t __p0) { +__ai __attribute__((target("sha2,neon"))) uint32_t vsha1h_u32(uint32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40601,13 +40447,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40618,13 +40464,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40636,13 +40482,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40653,13 +40499,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40671,13 +40517,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40689,13 +40535,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40706,13 +40552,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40726,242 +40572,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnda_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnda_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndi_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndi_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndm_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndm_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndn_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndn_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float32_t vrndns_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndp_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndp_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndx_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndx_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); @@ -40971,13 +40588,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); @@ -40987,13 +40604,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); @@ -41003,13 +40620,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); @@ -41019,13 +40636,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); @@ -41035,13 +40652,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); @@ -41051,13 +40668,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); @@ -41067,13 +40684,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); @@ -41083,13 +40700,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); @@ -41099,13 +40716,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); @@ -41115,13 +40732,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); @@ -41131,13 +40748,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); @@ -41146,84 +40763,245 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) } #endif +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float32_t vrndns_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -41234,13 +41012,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41251,13 +41029,13 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -41268,13 +41046,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41284,16 +41062,84 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0 } #endif +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + #endif #if defined(__ARM_FEATURE_FMA) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41302,7 +41148,7 @@ __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; @@ -41310,13 +41156,13 @@ __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41325,7 +41171,7 @@ __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; @@ -41333,13 +41179,13 @@ __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2 #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41350,13 +41196,13 @@ __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); return __ret; } #else -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41367,13 +41213,13 @@ __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, -__p1, __p2); return __ret; } #else -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41385,13 +41231,13 @@ __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, -__p1, __p2); return __ret; } #else -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41403,15 +41249,1881 @@ __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) #endif #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) +__ai __attribute__((target("aes,neon"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + bfloat16x8_t __ret_230; \ + bfloat16x8_t __s0_230 = __p0_230; \ + bfloat16x4_t __s2_230 = __p2_230; \ + __ret_230 = vsetq_lane_bf16(vget_lane_bf16(__s2_230, __p3_230), __s0_230, __p1_230); \ + __ret_230; \ +}) +#else +#define vcopyq_lane_bf16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + bfloat16x8_t __ret_231; \ + bfloat16x8_t __s0_231 = __p0_231; \ + bfloat16x4_t __s2_231 = __p2_231; \ + bfloat16x8_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_231; __rev2_231 = __builtin_shufflevector(__s2_231, __s2_231, 3, 2, 1, 0); \ + __ret_231 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_231, __p3_231), __rev0_231, __p1_231); \ + __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_231; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + bfloat16x4_t __ret_232; \ + bfloat16x4_t __s0_232 = __p0_232; \ + bfloat16x4_t __s2_232 = __p2_232; \ + __ret_232 = vset_lane_bf16(vget_lane_bf16(__s2_232, __p3_232), __s0_232, __p1_232); \ + __ret_232; \ +}) +#else +#define vcopy_lane_bf16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + bfloat16x4_t __ret_233; \ + bfloat16x4_t __s0_233 = __p0_233; \ + bfloat16x4_t __s2_233 = __p2_233; \ + bfloat16x4_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_233; __rev2_233 = __builtin_shufflevector(__s2_233, __s2_233, 3, 2, 1, 0); \ + __ret_233 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_233, __p3_233), __rev0_233, __p1_233); \ + __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 3, 2, 1, 0); \ + __ret_233; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + bfloat16x8_t __ret_234; \ + bfloat16x8_t __s0_234 = __p0_234; \ + bfloat16x8_t __s2_234 = __p2_234; \ + __ret_234 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_234, __p3_234), __s0_234, __p1_234); \ + __ret_234; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + bfloat16x8_t __ret_235; \ + bfloat16x8_t __s0_235 = __p0_235; \ + bfloat16x8_t __s2_235 = __p2_235; \ + bfloat16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_235; __rev2_235 = __builtin_shufflevector(__s2_235, __s2_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_235, __p3_235), __rev0_235, __p1_235); \ + __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + bfloat16x4_t __ret_236; \ + bfloat16x4_t __s0_236 = __p0_236; \ + bfloat16x8_t __s2_236 = __p2_236; \ + __ret_236 = vset_lane_bf16(vgetq_lane_bf16(__s2_236, __p3_236), __s0_236, __p1_236); \ + __ret_236; \ +}) +#else +#define vcopy_laneq_bf16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + bfloat16x4_t __ret_237; \ + bfloat16x4_t __s0_237 = __p0_237; \ + bfloat16x8_t __s2_237 = __p2_237; \ + bfloat16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_237 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_237, __p3_237), __rev0_237, __p1_237); \ + __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 3, 2, 1, 0); \ + __ret_237; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + uint32x4_t __ret_238; \ + uint32x4_t __s0_238 = __p0_238; \ + uint8x16_t __s1_238 = __p1_238; \ + uint8x16_t __s2_238 = __p2_238; \ +uint8x16_t __reint_238 = __s2_238; \ +uint32x4_t __reint1_238 = splatq_laneq_u32(*(uint32x4_t *) &__reint_238, __p3_238); \ + __ret_238 = vdotq_u32(__s0_238, __s1_238, *(uint8x16_t *) &__reint1_238); \ + __ret_238; \ +}) +#else +#define vdotq_laneq_u32(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + uint32x4_t __ret_239; \ + uint32x4_t __s0_239 = __p0_239; \ + uint8x16_t __s1_239 = __p1_239; \ + uint8x16_t __s2_239 = __p2_239; \ + uint32x4_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \ + uint8x16_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_239 = __rev2_239; \ +uint32x4_t __reint1_239 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_239, __p3_239); \ + __ret_239 = __noswap_vdotq_u32(__rev0_239, __rev1_239, *(uint8x16_t *) &__reint1_239); \ + __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \ + __ret_239; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_s32(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + int32x4_t __ret_240; \ + int32x4_t __s0_240 = __p0_240; \ + int8x16_t __s1_240 = __p1_240; \ + int8x16_t __s2_240 = __p2_240; \ +int8x16_t __reint_240 = __s2_240; \ +int32x4_t __reint1_240 = splatq_laneq_s32(*(int32x4_t *) &__reint_240, __p3_240); \ + __ret_240 = vdotq_s32(__s0_240, __s1_240, *(int8x16_t *) &__reint1_240); \ + __ret_240; \ +}) +#else +#define vdotq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ + int32x4_t __ret_241; \ + int32x4_t __s0_241 = __p0_241; \ + int8x16_t __s1_241 = __p1_241; \ + int8x16_t __s2_241 = __p2_241; \ + int32x4_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 3, 2, 1, 0); \ + int8x16_t __rev1_241; __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_241; __rev2_241 = __builtin_shufflevector(__s2_241, __s2_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_241 = __rev2_241; \ +int32x4_t __reint1_241 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_241, __p3_241); \ + __ret_241 = __noswap_vdotq_s32(__rev0_241, __rev1_241, *(int8x16_t *) &__reint1_241); \ + __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \ + __ret_241; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_u32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ + uint32x2_t __ret_242; \ + uint32x2_t __s0_242 = __p0_242; \ + uint8x8_t __s1_242 = __p1_242; \ + uint8x16_t __s2_242 = __p2_242; \ +uint8x16_t __reint_242 = __s2_242; \ +uint32x2_t __reint1_242 = splat_laneq_u32(*(uint32x4_t *) &__reint_242, __p3_242); \ + __ret_242 = vdot_u32(__s0_242, __s1_242, *(uint8x8_t *) &__reint1_242); \ + __ret_242; \ +}) +#else +#define vdot_laneq_u32(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ + uint32x2_t __ret_243; \ + uint32x2_t __s0_243 = __p0_243; \ + uint8x8_t __s1_243 = __p1_243; \ + uint8x16_t __s2_243 = __p2_243; \ + uint32x2_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 1, 0); \ + uint8x8_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_243; __rev2_243 = __builtin_shufflevector(__s2_243, __s2_243, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_243 = __rev2_243; \ +uint32x2_t __reint1_243 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_243, __p3_243); \ + __ret_243 = __noswap_vdot_u32(__rev0_243, __rev1_243, *(uint8x8_t *) &__reint1_243); \ + __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 1, 0); \ + __ret_243; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_s32(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ + int32x2_t __ret_244; \ + int32x2_t __s0_244 = __p0_244; \ + int8x8_t __s1_244 = __p1_244; \ + int8x16_t __s2_244 = __p2_244; \ +int8x16_t __reint_244 = __s2_244; \ +int32x2_t __reint1_244 = splat_laneq_s32(*(int32x4_t *) &__reint_244, __p3_244); \ + __ret_244 = vdot_s32(__s0_244, __s1_244, *(int8x8_t *) &__reint1_244); \ + __ret_244; \ +}) +#else +#define vdot_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ + int32x2_t __ret_245; \ + int32x2_t __s0_245 = __p0_245; \ + int8x8_t __s1_245 = __p1_245; \ + int8x16_t __s2_245 = __p2_245; \ + int32x2_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \ + int8x8_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_245; __rev2_245 = __builtin_shufflevector(__s2_245, __s2_245, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_245 = __rev2_245; \ +int32x2_t __reint1_245 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_245, __p3_245); \ + __ret_245 = __noswap_vdot_s32(__rev0_245, __rev1_245, *(int8x8_t *) &__reint1_245); \ + __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 1, 0); \ + __ret_245; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_lane_f16(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ + float16_t __ret_246; \ + float16_t __s0_246 = __p0_246; \ + float16_t __s1_246 = __p1_246; \ + float16x4_t __s2_246 = __p2_246; \ + __ret_246 = vfmah_lane_f16(__s0_246, -__s1_246, __s2_246, __p3_246); \ + __ret_246; \ +}) +#else +#define vfmsh_lane_f16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ + float16_t __ret_247; \ + float16_t __s0_247 = __p0_247; \ + float16_t __s1_247 = __p1_247; \ + float16x4_t __s2_247 = __p2_247; \ + float16x4_t __rev2_247; __rev2_247 = __builtin_shufflevector(__s2_247, __s2_247, 3, 2, 1, 0); \ + __ret_247 = __noswap_vfmah_lane_f16(__s0_247, -__s1_247, __rev2_247, __p3_247); \ + __ret_247; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ + float16x8_t __ret_248; \ + float16x8_t __s0_248 = __p0_248; \ + float16x8_t __s1_248 = __p1_248; \ + float16x4_t __s2_248 = __p2_248; \ + __ret_248 = vfmaq_lane_f16(__s0_248, -__s1_248, __s2_248, __p3_248); \ + __ret_248; \ +}) +#else +#define vfmsq_lane_f16(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ + float16x8_t __ret_249; \ + float16x8_t __s0_249 = __p0_249; \ + float16x8_t __s1_249 = __p1_249; \ + float16x4_t __s2_249 = __p2_249; \ + float16x8_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_249; __rev2_249 = __builtin_shufflevector(__s2_249, __s2_249, 3, 2, 1, 0); \ + __ret_249 = __noswap_vfmaq_lane_f16(__rev0_249, -__rev1_249, __rev2_249, __p3_249); \ + __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_249; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f16(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ + float16x4_t __ret_250; \ + float16x4_t __s0_250 = __p0_250; \ + float16x4_t __s1_250 = __p1_250; \ + float16x4_t __s2_250 = __p2_250; \ + __ret_250 = vfma_lane_f16(__s0_250, -__s1_250, __s2_250, __p3_250); \ + __ret_250; \ +}) +#else +#define vfms_lane_f16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ + float16x4_t __ret_251; \ + float16x4_t __s0_251 = __p0_251; \ + float16x4_t __s1_251 = __p1_251; \ + float16x4_t __s2_251 = __p2_251; \ + float16x4_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 3, 2, 1, 0); \ + float16x4_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 3, 2, 1, 0); \ + float16x4_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 3, 2, 1, 0); \ + __ret_251 = __noswap_vfma_lane_f16(__rev0_251, -__rev1_251, __rev2_251, __p3_251); \ + __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 3, 2, 1, 0); \ + __ret_251; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_laneq_f16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ + float16_t __ret_252; \ + float16_t __s0_252 = __p0_252; \ + float16_t __s1_252 = __p1_252; \ + float16x8_t __s2_252 = __p2_252; \ + __ret_252 = vfmah_laneq_f16(__s0_252, -__s1_252, __s2_252, __p3_252); \ + __ret_252; \ +}) +#else +#define vfmsh_laneq_f16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ + float16_t __ret_253; \ + float16_t __s0_253 = __p0_253; \ + float16_t __s1_253 = __p1_253; \ + float16x8_t __s2_253 = __p2_253; \ + float16x8_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_253 = __noswap_vfmah_laneq_f16(__s0_253, -__s1_253, __rev2_253, __p3_253); \ + __ret_253; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f16(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ + float16x8_t __ret_254; \ + float16x8_t __s0_254 = __p0_254; \ + float16x8_t __s1_254 = __p1_254; \ + float16x8_t __s2_254 = __p2_254; \ + __ret_254 = vfmaq_laneq_f16(__s0_254, -__s1_254, __s2_254, __p3_254); \ + __ret_254; \ +}) +#else +#define vfmsq_laneq_f16(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ + float16x8_t __ret_255; \ + float16x8_t __s0_255 = __p0_255; \ + float16x8_t __s1_255 = __p1_255; \ + float16x8_t __s2_255 = __p2_255; \ + float16x8_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255 = __noswap_vfmaq_laneq_f16(__rev0_255, -__rev1_255, __rev2_255, __p3_255); \ + __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ + float16x4_t __ret_256; \ + float16x4_t __s0_256 = __p0_256; \ + float16x4_t __s1_256 = __p1_256; \ + float16x8_t __s2_256 = __p2_256; \ + __ret_256 = vfma_laneq_f16(__s0_256, -__s1_256, __s2_256, __p3_256); \ + __ret_256; \ +}) +#else +#define vfms_laneq_f16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + float16x4_t __ret_257; \ + float16x4_t __s0_257 = __p0_257; \ + float16x4_t __s1_257 = __p1_257; \ + float16x8_t __s2_257 = __p2_257; \ + float16x4_t __rev0_257; __rev0_257 = __builtin_shufflevector(__s0_257, __s0_257, 3, 2, 1, 0); \ + float16x4_t __rev1_257; __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 3, 2, 1, 0); \ + float16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_257 = __noswap_vfma_laneq_f16(__rev0_257, -__rev1_257, __rev2_257, __p3_257); \ + __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 3, 2, 1, 0); \ + __ret_257; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f16(__p0_258, __p1_258, __p2_258) __extension__ ({ \ + float16x8_t __ret_258; \ + float16x8_t __s0_258 = __p0_258; \ + float16x8_t __s1_258 = __p1_258; \ + __ret_258 = __s0_258 * splatq_laneq_f16(__s1_258, __p2_258); \ + __ret_258; \ +}) +#else +#define vmulq_laneq_f16(__p0_259, __p1_259, __p2_259) __extension__ ({ \ + float16x8_t __ret_259; \ + float16x8_t __s0_259 = __p0_259; \ + float16x8_t __s1_259 = __p1_259; \ + float16x8_t __rev0_259; __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_259; __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259 = __rev0_259 * __noswap_splatq_laneq_f16(__rev1_259, __p2_259); \ + __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_260, __p1_260, __p2_260) __extension__ ({ \ + float16x4_t __ret_260; \ + float16x4_t __s0_260 = __p0_260; \ + float16x8_t __s1_260 = __p1_260; \ + __ret_260 = __s0_260 * splat_laneq_f16(__s1_260, __p2_260); \ + __ret_260; \ +}) +#else +#define vmul_laneq_f16(__p0_261, __p1_261, __p2_261) __extension__ ({ \ + float16x4_t __ret_261; \ + float16x4_t __s0_261 = __p0_261; \ + float16x8_t __s1_261 = __p1_261; \ + float16x4_t __rev0_261; __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 3, 2, 1, 0); \ + float16x8_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_261 = __rev0_261 * __noswap_splat_laneq_f16(__rev1_261, __p2_261); \ + __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 3, 2, 1, 0); \ + __ret_261; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \ + float16x8_t __ret_262; \ + float16x8_t __s0_262 = __p0_262; \ + float16x4_t __s1_262 = __p1_262; \ + __ret_262 = vmulxq_f16(__s0_262, splatq_lane_f16(__s1_262, __p2_262)); \ + __ret_262; \ +}) +#else +#define vmulxq_lane_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \ + float16x8_t __ret_263; \ + float16x8_t __s0_263 = __p0_263; \ + float16x4_t __s1_263 = __p1_263; \ + float16x8_t __rev0_263; __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 3, 2, 1, 0); \ + __ret_263 = __noswap_vmulxq_f16(__rev0_263, __noswap_splatq_lane_f16(__rev1_263, __p2_263)); \ + __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_263; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \ + float16x4_t __ret_264; \ + float16x4_t __s0_264 = __p0_264; \ + float16x4_t __s1_264 = __p1_264; \ + __ret_264 = vmulx_f16(__s0_264, splat_lane_f16(__s1_264, __p2_264)); \ + __ret_264; \ +}) +#else +#define vmulx_lane_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \ + float16x4_t __ret_265; \ + float16x4_t __s0_265 = __p0_265; \ + float16x4_t __s1_265 = __p1_265; \ + float16x4_t __rev0_265; __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 3, 2, 1, 0); \ + float16x4_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \ + __ret_265 = __noswap_vmulx_f16(__rev0_265, __noswap_splat_lane_f16(__rev1_265, __p2_265)); \ + __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 3, 2, 1, 0); \ + __ret_265; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \ + float16x8_t __ret_266; \ + float16x8_t __s0_266 = __p0_266; \ + float16x8_t __s1_266 = __p1_266; \ + __ret_266 = vmulxq_f16(__s0_266, splatq_laneq_f16(__s1_266, __p2_266)); \ + __ret_266; \ +}) +#else +#define vmulxq_laneq_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \ + float16x8_t __ret_267; \ + float16x8_t __s0_267 = __p0_267; \ + float16x8_t __s1_267 = __p1_267; \ + float16x8_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267 = __noswap_vmulxq_f16(__rev0_267, __noswap_splatq_laneq_f16(__rev1_267, __p2_267)); \ + __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f16(__p0_268, __p1_268, __p2_268) __extension__ ({ \ + float16x4_t __ret_268; \ + float16x4_t __s0_268 = __p0_268; \ + float16x8_t __s1_268 = __p1_268; \ + __ret_268 = vmulx_f16(__s0_268, splat_laneq_f16(__s1_268, __p2_268)); \ + __ret_268; \ +}) +#else +#define vmulx_laneq_f16(__p0_269, __p1_269, __p2_269) __extension__ ({ \ + float16x4_t __ret_269; \ + float16x4_t __s0_269 = __p0_269; \ + float16x8_t __s1_269 = __p1_269; \ + float16x4_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 3, 2, 1, 0); \ + float16x8_t __rev1_269; __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_269 = __noswap_vmulx_f16(__rev0_269, __noswap_splat_laneq_f16(__rev1_269, __p2_269)); \ + __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 3, 2, 1, 0); \ + __ret_269; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_laneq_s32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + int32x4_t __ret_270; \ + int32x4_t __s0_270 = __p0_270; \ + int8x16_t __s1_270 = __p1_270; \ + uint8x16_t __s2_270 = __p2_270; \ +uint8x16_t __reint_270 = __s2_270; \ + __ret_270 = vusdotq_s32(__s0_270, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_270, __p3_270)), __s1_270); \ + __ret_270; \ +}) +#else +#define vsudotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + int32x4_t __ret_271; \ + int32x4_t __s0_271 = __p0_271; \ + int8x16_t __s1_271 = __p1_271; \ + uint8x16_t __s2_271 = __p2_271; \ + int32x4_t __rev0_271; __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \ + int8x16_t __rev1_271; __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_271 = __rev2_271; \ + __ret_271 = __noswap_vusdotq_s32(__rev0_271, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271)), __rev1_271); \ + __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \ + __ret_271; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + int32x2_t __ret_272; \ + int32x2_t __s0_272 = __p0_272; \ + int8x8_t __s1_272 = __p1_272; \ + uint8x16_t __s2_272 = __p2_272; \ +uint8x16_t __reint_272 = __s2_272; \ + __ret_272 = vusdot_s32(__s0_272, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272)), __s1_272); \ + __ret_272; \ +}) +#else +#define vsudot_laneq_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + int32x2_t __ret_273; \ + int32x2_t __s0_273 = __p0_273; \ + int8x8_t __s1_273 = __p1_273; \ + uint8x16_t __s2_273 = __p2_273; \ + int32x2_t __rev0_273; __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \ + int8x8_t __rev1_273; __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_273 = __rev2_273; \ + __ret_273 = __noswap_vusdot_s32(__rev0_273, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_273, __p3_273)), __rev1_273); \ + __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \ + __ret_273; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + int32x4_t __ret_274; \ + int32x4_t __s0_274 = __p0_274; \ + uint8x16_t __s1_274 = __p1_274; \ + int8x16_t __s2_274 = __p2_274; \ +int8x16_t __reint_274 = __s2_274; \ + __ret_274 = vusdotq_s32(__s0_274, __s1_274, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_274, __p3_274))); \ + __ret_274; \ +}) +#else +#define vusdotq_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int32x4_t __ret_275; \ + int32x4_t __s0_275 = __p0_275; \ + uint8x16_t __s1_275 = __p1_275; \ + int8x16_t __s2_275 = __p2_275; \ + int32x4_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \ + uint8x16_t __rev1_275; __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_275 = __rev2_275; \ + __ret_275 = __noswap_vusdotq_s32(__rev0_275, __rev1_275, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275))); \ + __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \ + __ret_275; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int32x2_t __ret_276; \ + int32x2_t __s0_276 = __p0_276; \ + uint8x8_t __s1_276 = __p1_276; \ + int8x16_t __s2_276 = __p2_276; \ +int8x16_t __reint_276 = __s2_276; \ + __ret_276 = vusdot_s32(__s0_276, __s1_276, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276))); \ + __ret_276; \ +}) +#else +#define vusdot_laneq_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + int32x2_t __ret_277; \ + int32x2_t __s0_277 = __p0_277; \ + uint8x8_t __s1_277 = __p1_277; \ + int8x16_t __s2_277 = __p2_277; \ + int32x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \ + uint8x8_t __rev1_277; __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_277 = __rev2_277; \ + __ret_277 = __noswap_vusdot_s32(__rev0_277, __rev1_277, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_277, __p3_277))); \ + __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \ + __ret_277; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41421,29 +43133,29 @@ __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64_t vabdd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); return __ret; } -__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32_t vabds_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabsq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai float64x2_t vabsq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); @@ -41453,13 +43165,13 @@ __ai float64x2_t vabsq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabsq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vabsq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); @@ -41468,29 +43180,29 @@ __ai int64x2_t vabsq_s64(int64x2_t __p0) { } #endif -__ai float64x1_t vabs_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vabs_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); return __ret; } -__ai int64x1_t vabs_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vabs_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); return __ret; } -__ai int64_t vabsd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vabsd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 + __p1; return __ret; } #else -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41500,34 +43212,34 @@ __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 + __p1; return __ret; } -__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); return __ret; } -__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vaddd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); return __ret; } -__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { +__ai __attribute__((target("neon"))) poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { poly128_t __ret; __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); return __ret; } #else -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41539,13 +43251,13 @@ __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); return __ret; } #else -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41557,13 +43269,13 @@ __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -41575,13 +43287,13 @@ __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); return __ret; } #else -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41593,13 +43305,13 @@ __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); return __ret; } #else -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41611,13 +43323,13 @@ __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); return __ret; } #else -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -41629,13 +43341,13 @@ __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); return __ret; } #else -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { uint16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); @@ -41644,13 +43356,13 @@ __ai uint16_t vaddlvq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); return __ret; } #else -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { uint64_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); @@ -41659,13 +43371,13 @@ __ai uint64_t vaddlvq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); return __ret; } #else -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { uint32_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); @@ -41674,13 +43386,13 @@ __ai uint32_t vaddlvq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); return __ret; } #else -__ai int16_t vaddlvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { int16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); @@ -41689,13 +43401,13 @@ __ai int16_t vaddlvq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); return __ret; } #else -__ai int64_t vaddlvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { int64_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); @@ -41704,13 +43416,13 @@ __ai int64_t vaddlvq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); return __ret; } #else -__ai int32_t vaddlvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { int32_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); @@ -41719,13 +43431,13 @@ __ai int32_t vaddlvq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); return __ret; } #else -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { uint16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); @@ -41734,13 +43446,13 @@ __ai uint16_t vaddlv_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); return __ret; } #else -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { uint64_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); @@ -41749,13 +43461,13 @@ __ai uint64_t vaddlv_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); return __ret; } #else -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { uint32_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); @@ -41764,13 +43476,13 @@ __ai uint32_t vaddlv_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); return __ret; } #else -__ai int16_t vaddlv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { int16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); @@ -41779,13 +43491,13 @@ __ai int16_t vaddlv_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); return __ret; } #else -__ai int64_t vaddlv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { int64_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); @@ -41794,13 +43506,13 @@ __ai int64_t vaddlv_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); return __ret; } #else -__ai int32_t vaddlv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { int32_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); @@ -41809,13 +43521,13 @@ __ai int32_t vaddlv_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); return __ret; } #else -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); @@ -41824,13 +43536,13 @@ __ai uint8_t vaddvq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); return __ret; } #else -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); @@ -41839,13 +43551,13 @@ __ai uint32_t vaddvq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); return __ret; } #else -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); @@ -41854,13 +43566,13 @@ __ai uint64_t vaddvq_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); return __ret; } #else -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); @@ -41869,13 +43581,13 @@ __ai uint16_t vaddvq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); return __ret; } #else -__ai int8_t vaddvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); @@ -41884,13 +43596,13 @@ __ai int8_t vaddvq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vaddvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); return __ret; } #else -__ai float64_t vaddvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); @@ -41899,13 +43611,13 @@ __ai float64_t vaddvq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); return __ret; } #else -__ai float32_t vaddvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); @@ -41914,13 +43626,13 @@ __ai float32_t vaddvq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); return __ret; } #else -__ai int32_t vaddvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); @@ -41929,13 +43641,13 @@ __ai int32_t vaddvq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddvq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); return __ret; } #else -__ai int64_t vaddvq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { int64_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); @@ -41944,13 +43656,13 @@ __ai int64_t vaddvq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); return __ret; } #else -__ai int16_t vaddvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); @@ -41959,13 +43671,13 @@ __ai int16_t vaddvq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); return __ret; } #else -__ai uint8_t vaddv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); @@ -41974,13 +43686,13 @@ __ai uint8_t vaddv_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); return __ret; } #else -__ai uint32_t vaddv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); @@ -41989,13 +43701,13 @@ __ai uint32_t vaddv_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); return __ret; } #else -__ai uint16_t vaddv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); @@ -42004,13 +43716,13 @@ __ai uint16_t vaddv_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); return __ret; } #else -__ai int8_t vaddv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); @@ -42019,13 +43731,13 @@ __ai int8_t vaddv_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); return __ret; } #else -__ai float32_t vaddv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); @@ -42034,13 +43746,13 @@ __ai float32_t vaddv_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); return __ret; } #else -__ai int32_t vaddv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); @@ -42049,13 +43761,13 @@ __ai int32_t vaddv_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); return __ret; } #else -__ai int16_t vaddv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); @@ -42063,19 +43775,19 @@ __ai int16_t vaddv_s16(int16x4_t __p0) { } #endif -__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { +__ai __attribute__((target("neon"))) poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { poly64x1_t __ret; __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); return __ret; } #else -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42087,13 +43799,13 @@ __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42104,19 +43816,19 @@ __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) } #endif -__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42126,29 +43838,29 @@ __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); return __ret; } -__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcages_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42158,29 +43870,29 @@ __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); return __ret; } -__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42190,29 +43902,29 @@ __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); return __ret; } -__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcales_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42222,34 +43934,34 @@ __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); return __ret; } -__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); return __ret; } -__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42260,13 +43972,13 @@ __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42277,13 +43989,13 @@ __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42294,13 +44006,13 @@ __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 == __p1); return __ret; } #else -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42310,49 +44022,49 @@ __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } -__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } -__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 == __p1); return __ret; } -__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); return __ret; } -__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); return __ret; } -__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); return __ret; } -__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { uint8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); @@ -42361,19 +44073,19 @@ __ai uint8x8_t vceqz_p8(poly8x8_t __p0) { } #endif -__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vceqz_p64(poly64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { uint8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); @@ -42383,13 +44095,13 @@ __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); @@ -42399,13 +44111,13 @@ __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); @@ -42415,13 +44127,13 @@ __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); @@ -42431,13 +44143,13 @@ __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); @@ -42447,13 +44159,13 @@ __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); @@ -42463,13 +44175,13 @@ __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); @@ -42479,13 +44191,13 @@ __ai uint8x16_t vceqzq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); @@ -42495,13 +44207,13 @@ __ai uint64x2_t vceqzq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); @@ -42511,13 +44223,13 @@ __ai uint32x4_t vceqzq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); @@ -42527,13 +44239,13 @@ __ai uint32x4_t vceqzq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); @@ -42543,13 +44255,13 @@ __ai uint64x2_t vceqzq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); @@ -42559,13 +44271,13 @@ __ai uint16x8_t vceqzq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); @@ -42575,13 +44287,13 @@ __ai uint8x8_t vceqz_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); @@ -42590,19 +44302,19 @@ __ai uint32x2_t vceqz_u32(uint32x2_t __p0) { } #endif -__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vceqz_u64(uint64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); @@ -42612,13 +44324,13 @@ __ai uint16x4_t vceqz_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); @@ -42627,19 +44339,19 @@ __ai uint8x8_t vceqz_s8(int8x8_t __p0) { } #endif -__ai uint64x1_t vceqz_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vceqz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); @@ -42649,13 +44361,13 @@ __ai uint32x2_t vceqz_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); @@ -42664,19 +44376,19 @@ __ai uint32x2_t vceqz_s32(int32x2_t __p0) { } #endif -__ai uint64x1_t vceqz_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vceqz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); @@ -42685,34 +44397,34 @@ __ai uint16x4_t vceqz_s16(int16x4_t __p0) { } #endif -__ai uint64_t vceqzd_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vceqzd_u64(uint64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); return __ret; } -__ai uint64_t vceqzd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vceqzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); return __ret; } -__ai uint64_t vceqzd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vceqzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); return __ret; } -__ai uint32_t vceqzs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vceqzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42723,13 +44435,13 @@ __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42740,13 +44452,13 @@ __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 >= __p1); return __ret; } #else -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42756,49 +44468,49 @@ __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } -__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } -__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 >= __p1); return __ret; } -__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcged_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); return __ret; } -__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); return __ret; } -__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcged_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); return __ret; } -__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcges_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); @@ -42808,13 +44520,13 @@ __ai uint8x16_t vcgezq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); @@ -42824,13 +44536,13 @@ __ai uint64x2_t vcgezq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); @@ -42840,13 +44552,13 @@ __ai uint32x4_t vcgezq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); @@ -42856,13 +44568,13 @@ __ai uint32x4_t vcgezq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); @@ -42872,13 +44584,13 @@ __ai uint64x2_t vcgezq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); @@ -42888,13 +44600,13 @@ __ai uint16x8_t vcgezq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); @@ -42903,19 +44615,19 @@ __ai uint8x8_t vcgez_s8(int8x8_t __p0) { } #endif -__ai uint64x1_t vcgez_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcgez_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); @@ -42925,13 +44637,13 @@ __ai uint32x2_t vcgez_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); @@ -42940,19 +44652,19 @@ __ai uint32x2_t vcgez_s32(int32x2_t __p0) { } #endif -__ai uint64x1_t vcgez_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcgez_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); @@ -42961,29 +44673,29 @@ __ai uint16x4_t vcgez_s16(int16x4_t __p0) { } #endif -__ai uint64_t vcgezd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcgezd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); return __ret; } -__ai uint64_t vcgezd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcgezd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); return __ret; } -__ai uint32_t vcgezs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcgezs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -42994,13 +44706,13 @@ __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43011,13 +44723,13 @@ __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43027,49 +44739,49 @@ __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); return __ret; } -__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); return __ret; } -__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); return __ret; } -__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); @@ -43079,13 +44791,13 @@ __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); @@ -43095,13 +44807,13 @@ __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); @@ -43111,13 +44823,13 @@ __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); @@ -43127,13 +44839,13 @@ __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); @@ -43143,13 +44855,13 @@ __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); @@ -43159,13 +44871,13 @@ __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); @@ -43174,19 +44886,19 @@ __ai uint8x8_t vcgtz_s8(int8x8_t __p0) { } #endif -__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); @@ -43196,13 +44908,13 @@ __ai uint32x2_t vcgtz_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); @@ -43211,19 +44923,19 @@ __ai uint32x2_t vcgtz_s32(int32x2_t __p0) { } #endif -__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); @@ -43232,29 +44944,29 @@ __ai uint16x4_t vcgtz_s16(int16x4_t __p0) { } #endif -__ai uint64_t vcgtzd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcgtzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); return __ret; } -__ai uint64_t vcgtzd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcgtzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); return __ret; } -__ai uint32_t vcgtzs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcgtzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43265,13 +44977,13 @@ __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43282,13 +44994,13 @@ __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43298,49 +45010,49 @@ __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } -__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } -__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } -__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); return __ret; } -__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcled_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); return __ret; } -__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcled_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); return __ret; } -__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vcles_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); @@ -43350,13 +45062,13 @@ __ai uint8x16_t vclezq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); @@ -43366,13 +45078,13 @@ __ai uint64x2_t vclezq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); @@ -43382,13 +45094,13 @@ __ai uint32x4_t vclezq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); @@ -43398,13 +45110,13 @@ __ai uint32x4_t vclezq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); @@ -43414,13 +45126,13 @@ __ai uint64x2_t vclezq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); @@ -43430,13 +45142,13 @@ __ai uint16x8_t vclezq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclez_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vclez_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); @@ -43445,19 +45157,19 @@ __ai uint8x8_t vclez_s8(int8x8_t __p0) { } #endif -__ai uint64x1_t vclez_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vclez_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vclez_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); @@ -43467,13 +45179,13 @@ __ai uint32x2_t vclez_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vclez_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); @@ -43482,19 +45194,19 @@ __ai uint32x2_t vclez_s32(int32x2_t __p0) { } #endif -__ai uint64x1_t vclez_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vclez_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclez_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vclez_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); @@ -43503,29 +45215,29 @@ __ai uint16x4_t vclez_s16(int16x4_t __p0) { } #endif -__ai uint64_t vclezd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vclezd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); return __ret; } -__ai uint64_t vclezd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vclezd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); return __ret; } -__ai uint32_t vclezs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vclezs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43536,13 +45248,13 @@ __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43553,13 +45265,13 @@ __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -43569,49 +45281,49 @@ __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); return __ret; } -__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); return __ret; } -__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); return __ret; } -__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vclts_f32(float32_t __p0, float32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { uint8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); @@ -43621,13 +45333,13 @@ __ai uint8x16_t vcltzq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); @@ -43637,13 +45349,13 @@ __ai uint64x2_t vcltzq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); @@ -43653,13 +45365,13 @@ __ai uint32x4_t vcltzq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); @@ -43669,13 +45381,13 @@ __ai uint32x4_t vcltzq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); @@ -43685,13 +45397,13 @@ __ai uint64x2_t vcltzq_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); @@ -43701,13 +45413,13 @@ __ai uint16x8_t vcltzq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); @@ -43716,19 +45428,19 @@ __ai uint8x8_t vcltz_s8(int8x8_t __p0) { } #endif -__ai uint64x1_t vcltz_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcltz_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); @@ -43738,13 +45450,13 @@ __ai uint32x2_t vcltz_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { uint32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); @@ -43753,19 +45465,19 @@ __ai uint32x2_t vcltz_s32(int32x2_t __p0) { } #endif -__ai uint64x1_t vcltz_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcltz_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); @@ -43774,29 +45486,29 @@ __ai uint16x4_t vcltz_s16(int16x4_t __p0) { } #endif -__ai uint64_t vcltzd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcltzd_s64(int64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); return __ret; } -__ai uint64_t vcltzd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcltzd_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); return __ret; } -__ai uint32_t vcltzs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcltzs_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -43805,13 +45517,13 @@ __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -43820,892 +45532,892 @@ __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p8(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ - poly8x16_t __ret_257; \ - poly8x16_t __s0_257 = __p0_257; \ - poly8x8_t __s2_257 = __p2_257; \ - __ret_257 = vsetq_lane_p8(vget_lane_p8(__s2_257, __p3_257), __s0_257, __p1_257); \ - __ret_257; \ -}) -#else -#define vcopyq_lane_p8(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ - poly8x16_t __ret_258; \ - poly8x16_t __s0_258 = __p0_258; \ - poly8x8_t __s2_258 = __p2_258; \ - poly8x16_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_258 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_258, __p3_258), __rev0_258, __p1_258); \ - __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_258; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ - poly16x8_t __ret_259; \ - poly16x8_t __s0_259 = __p0_259; \ - poly16x4_t __s2_259 = __p2_259; \ - __ret_259 = vsetq_lane_p16(vget_lane_p16(__s2_259, __p3_259), __s0_259, __p1_259); \ - __ret_259; \ -}) -#else -#define vcopyq_lane_p16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ - poly16x8_t __ret_260; \ - poly16x8_t __s0_260 = __p0_260; \ - poly16x4_t __s2_260 = __p2_260; \ - poly16x8_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \ - __ret_260 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_260, __p3_260), __rev0_260, __p1_260); \ - __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_260; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ - uint8x16_t __ret_261; \ - uint8x16_t __s0_261 = __p0_261; \ - uint8x8_t __s2_261 = __p2_261; \ - __ret_261 = vsetq_lane_u8(vget_lane_u8(__s2_261, __p3_261), __s0_261, __p1_261); \ - __ret_261; \ -}) -#else -#define vcopyq_lane_u8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ - uint8x16_t __ret_262; \ - uint8x16_t __s0_262 = __p0_262; \ - uint8x8_t __s2_262 = __p2_262; \ - uint8x16_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_262 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_262, __p3_262), __rev0_262, __p1_262); \ - __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_262; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ - uint32x4_t __ret_263; \ - uint32x4_t __s0_263 = __p0_263; \ - uint32x2_t __s2_263 = __p2_263; \ - __ret_263 = vsetq_lane_u32(vget_lane_u32(__s2_263, __p3_263), __s0_263, __p1_263); \ - __ret_263; \ -}) -#else -#define vcopyq_lane_u32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ - uint32x4_t __ret_264; \ - uint32x4_t __s0_264 = __p0_264; \ - uint32x2_t __s2_264 = __p2_264; \ - uint32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ - uint32x2_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \ - __ret_264 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_264, __p3_264), __rev0_264, __p1_264); \ - __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ - __ret_264; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u64(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ - uint64x2_t __ret_265; \ - uint64x2_t __s0_265 = __p0_265; \ - uint64x1_t __s2_265 = __p2_265; \ - __ret_265 = vsetq_lane_u64(vget_lane_u64(__s2_265, __p3_265), __s0_265, __p1_265); \ - __ret_265; \ -}) -#else -#define vcopyq_lane_u64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ - uint64x2_t __ret_266; \ - uint64x2_t __s0_266 = __p0_266; \ - uint64x1_t __s2_266 = __p2_266; \ - uint64x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ - __ret_266 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_266, __p3_266), __rev0_266, __p1_266); \ - __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ - __ret_266; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ - uint16x8_t __ret_267; \ - uint16x8_t __s0_267 = __p0_267; \ - uint16x4_t __s2_267 = __p2_267; \ - __ret_267 = vsetq_lane_u16(vget_lane_u16(__s2_267, __p3_267), __s0_267, __p1_267); \ - __ret_267; \ -}) -#else -#define vcopyq_lane_u16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ - uint16x8_t __ret_268; \ - uint16x8_t __s0_268 = __p0_268; \ - uint16x4_t __s2_268 = __p2_268; \ - uint16x8_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 3, 2, 1, 0); \ - __ret_268 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_268, __p3_268), __rev0_268, __p1_268); \ - __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_268; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s8(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ - int8x16_t __ret_269; \ - int8x16_t __s0_269 = __p0_269; \ - int8x8_t __s2_269 = __p2_269; \ - __ret_269 = vsetq_lane_s8(vget_lane_s8(__s2_269, __p3_269), __s0_269, __p1_269); \ - __ret_269; \ -}) -#else -#define vcopyq_lane_s8(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ - int8x16_t __ret_270; \ - int8x16_t __s0_270 = __p0_270; \ - int8x8_t __s2_270 = __p2_270; \ - int8x16_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_270 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_270, __p3_270), __rev0_270, __p1_270); \ - __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_270; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ - float32x4_t __ret_271; \ - float32x4_t __s0_271 = __p0_271; \ - float32x2_t __s2_271 = __p2_271; \ - __ret_271 = vsetq_lane_f32(vget_lane_f32(__s2_271, __p3_271), __s0_271, __p1_271); \ - __ret_271; \ -}) -#else -#define vcopyq_lane_f32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ - float32x4_t __ret_272; \ - float32x4_t __s0_272 = __p0_272; \ - float32x2_t __s2_272 = __p2_272; \ - float32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ - float32x2_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 1, 0); \ - __ret_272 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_272, __p3_272), __rev0_272, __p1_272); \ - __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ - __ret_272; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ - int32x4_t __ret_273; \ - int32x4_t __s0_273 = __p0_273; \ - int32x2_t __s2_273 = __p2_273; \ - __ret_273 = vsetq_lane_s32(vget_lane_s32(__s2_273, __p3_273), __s0_273, __p1_273); \ - __ret_273; \ -}) -#else -#define vcopyq_lane_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ - int32x4_t __ret_274; \ - int32x4_t __s0_274 = __p0_274; \ - int32x2_t __s2_274 = __p2_274; \ - int32x4_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \ - int32x2_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 1, 0); \ - __ret_274 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_274, __p3_274), __rev0_274, __p1_274); \ - __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \ - __ret_274; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ - int64x2_t __ret_275; \ - int64x2_t __s0_275 = __p0_275; \ - int64x1_t __s2_275 = __p2_275; \ - __ret_275 = vsetq_lane_s64(vget_lane_s64(__s2_275, __p3_275), __s0_275, __p1_275); \ - __ret_275; \ -}) -#else -#define vcopyq_lane_s64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ - int64x2_t __ret_276; \ - int64x2_t __s0_276 = __p0_276; \ - int64x1_t __s2_276 = __p2_276; \ - int64x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ - __ret_276 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_276, __p3_276), __rev0_276, __p1_276); \ - __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ - __ret_276; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ - int16x8_t __ret_277; \ - int16x8_t __s0_277 = __p0_277; \ - int16x4_t __s2_277 = __p2_277; \ - __ret_277 = vsetq_lane_s16(vget_lane_s16(__s2_277, __p3_277), __s0_277, __p1_277); \ - __ret_277; \ -}) -#else -#define vcopyq_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ - int16x8_t __ret_278; \ - int16x8_t __s0_278 = __p0_278; \ - int16x4_t __s2_278 = __p2_278; \ - int16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_278; __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 3, 2, 1, 0); \ - __ret_278 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_278, __p3_278), __rev0_278, __p1_278); \ - __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcopyq_lane_p8(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + poly8x16_t __ret_278; \ + poly8x16_t __s0_278 = __p0_278; \ + poly8x8_t __s2_278 = __p2_278; \ + __ret_278 = vsetq_lane_p8(vget_lane_p8(__s2_278, __p3_278), __s0_278, __p1_278); \ __ret_278; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ - poly8x8_t __ret_279; \ - poly8x8_t __s0_279 = __p0_279; \ +#else +#define vcopyq_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly8x16_t __ret_279; \ + poly8x16_t __s0_279 = __p0_279; \ poly8x8_t __s2_279 = __p2_279; \ - __ret_279 = vset_lane_p8(vget_lane_p8(__s2_279, __p3_279), __s0_279, __p1_279); \ + poly8x16_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_279 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_279, __p3_279), __rev0_279, __p1_279); \ + __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_279; \ }) -#else -#define vcopy_lane_p8(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ - poly8x8_t __ret_280; \ - poly8x8_t __s0_280 = __p0_280; \ - poly8x8_t __s2_280 = __p2_280; \ - poly8x8_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_280; __rev2_280 = __builtin_shufflevector(__s2_280, __s2_280, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_280 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_280, __p3_280), __rev0_280, __p1_280); \ - __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + poly16x8_t __ret_280; \ + poly16x8_t __s0_280 = __p0_280; \ + poly16x4_t __s2_280 = __p2_280; \ + __ret_280 = vsetq_lane_p16(vget_lane_p16(__s2_280, __p3_280), __s0_280, __p1_280); \ __ret_280; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ - poly16x4_t __ret_281; \ - poly16x4_t __s0_281 = __p0_281; \ +#else +#define vcopyq_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + poly16x8_t __ret_281; \ + poly16x8_t __s0_281 = __p0_281; \ poly16x4_t __s2_281 = __p2_281; \ - __ret_281 = vset_lane_p16(vget_lane_p16(__s2_281, __p3_281), __s0_281, __p1_281); \ + poly16x8_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \ + __ret_281 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_281, __p3_281), __rev0_281, __p1_281); \ + __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_281; \ }) -#else -#define vcopy_lane_p16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ - poly16x4_t __ret_282; \ - poly16x4_t __s0_282 = __p0_282; \ - poly16x4_t __s2_282 = __p2_282; \ - poly16x4_t __rev0_282; __rev0_282 = __builtin_shufflevector(__s0_282, __s0_282, 3, 2, 1, 0); \ - poly16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ - __ret_282 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_282, __p3_282), __rev0_282, __p1_282); \ - __ret_282 = __builtin_shufflevector(__ret_282, __ret_282, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + uint8x16_t __ret_282; \ + uint8x16_t __s0_282 = __p0_282; \ + uint8x8_t __s2_282 = __p2_282; \ + __ret_282 = vsetq_lane_u8(vget_lane_u8(__s2_282, __p3_282), __s0_282, __p1_282); \ __ret_282; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ - uint8x8_t __ret_283; \ - uint8x8_t __s0_283 = __p0_283; \ +#else +#define vcopyq_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + uint8x16_t __ret_283; \ + uint8x16_t __s0_283 = __p0_283; \ uint8x8_t __s2_283 = __p2_283; \ - __ret_283 = vset_lane_u8(vget_lane_u8(__s2_283, __p3_283), __s0_283, __p1_283); \ + uint8x16_t __rev0_283; __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_283 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_283, __p3_283), __rev0_283, __p1_283); \ + __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_283; \ }) -#else -#define vcopy_lane_u8(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ - uint8x8_t __ret_284; \ - uint8x8_t __s0_284 = __p0_284; \ - uint8x8_t __s2_284 = __p2_284; \ - uint8x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_284 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_284, __p3_284), __rev0_284, __p1_284); \ - __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + uint32x4_t __ret_284; \ + uint32x4_t __s0_284 = __p0_284; \ + uint32x2_t __s2_284 = __p2_284; \ + __ret_284 = vsetq_lane_u32(vget_lane_u32(__s2_284, __p3_284), __s0_284, __p1_284); \ __ret_284; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ - uint32x2_t __ret_285; \ - uint32x2_t __s0_285 = __p0_285; \ +#else +#define vcopyq_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + uint32x4_t __ret_285; \ + uint32x4_t __s0_285 = __p0_285; \ uint32x2_t __s2_285 = __p2_285; \ - __ret_285 = vset_lane_u32(vget_lane_u32(__s2_285, __p3_285), __s0_285, __p1_285); \ + uint32x4_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 3, 2, 1, 0); \ + uint32x2_t __rev2_285; __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \ + __ret_285 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_285, __p3_285), __rev0_285, __p1_285); \ + __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 3, 2, 1, 0); \ __ret_285; \ }) -#else -#define vcopy_lane_u32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ - uint32x2_t __ret_286; \ - uint32x2_t __s0_286 = __p0_286; \ - uint32x2_t __s2_286 = __p2_286; \ - uint32x2_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 1, 0); \ - uint32x2_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 1, 0); \ - __ret_286 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_286, __p3_286), __rev0_286, __p1_286); \ - __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + uint64x2_t __ret_286; \ + uint64x2_t __s0_286 = __p0_286; \ + uint64x1_t __s2_286 = __p2_286; \ + __ret_286 = vsetq_lane_u64(vget_lane_u64(__s2_286, __p3_286), __s0_286, __p1_286); \ __ret_286; \ }) +#else +#define vcopyq_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + uint64x2_t __ret_287; \ + uint64x2_t __s0_287 = __p0_287; \ + uint64x1_t __s2_287 = __p2_287; \ + uint64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \ + __ret_287 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_287, __p3_287), __rev0_287, __p1_287); \ + __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \ + __ret_287; \ +}) #endif -#define vcopy_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ - uint64x1_t __ret_287; \ - uint64x1_t __s0_287 = __p0_287; \ - uint64x1_t __s2_287 = __p2_287; \ - __ret_287 = vset_lane_u64(vget_lane_u64(__s2_287, __p3_287), __s0_287, __p1_287); \ - __ret_287; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ - uint16x4_t __ret_288; \ - uint16x4_t __s0_288 = __p0_288; \ +#define vcopyq_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + uint16x8_t __ret_288; \ + uint16x8_t __s0_288 = __p0_288; \ uint16x4_t __s2_288 = __p2_288; \ - __ret_288 = vset_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ + __ret_288 = vsetq_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ __ret_288; \ }) #else -#define vcopy_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ - uint16x4_t __ret_289; \ - uint16x4_t __s0_289 = __p0_289; \ +#define vcopyq_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + uint16x8_t __ret_289; \ + uint16x8_t __s0_289 = __p0_289; \ uint16x4_t __s2_289 = __p2_289; \ - uint16x4_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 3, 2, 1, 0); \ + uint16x8_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ - __ret_289 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ - __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 3, 2, 1, 0); \ + __ret_289 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ + __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_289; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ - int8x8_t __ret_290; \ - int8x8_t __s0_290 = __p0_290; \ +#define vcopyq_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + int8x16_t __ret_290; \ + int8x16_t __s0_290 = __p0_290; \ int8x8_t __s2_290 = __p2_290; \ - __ret_290 = vset_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ + __ret_290 = vsetq_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ __ret_290; \ }) #else -#define vcopy_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ - int8x8_t __ret_291; \ - int8x8_t __s0_291 = __p0_291; \ +#define vcopyq_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + int8x16_t __ret_291; \ + int8x16_t __s0_291 = __p0_291; \ int8x8_t __s2_291 = __p2_291; \ - int8x8_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_291 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ - __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ + __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_291; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ - float32x2_t __ret_292; \ - float32x2_t __s0_292 = __p0_292; \ +#define vcopyq_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float32x4_t __ret_292; \ + float32x4_t __s0_292 = __p0_292; \ float32x2_t __s2_292 = __p2_292; \ - __ret_292 = vset_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ + __ret_292 = vsetq_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ __ret_292; \ }) #else -#define vcopy_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ - float32x2_t __ret_293; \ - float32x2_t __s0_293 = __p0_293; \ +#define vcopyq_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ + float32x4_t __ret_293; \ + float32x4_t __s0_293 = __p0_293; \ float32x2_t __s2_293 = __p2_293; \ - float32x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \ + float32x4_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 3, 2, 1, 0); \ float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ - __ret_293 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ - __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \ + __ret_293 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ + __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 3, 2, 1, 0); \ __ret_293; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ - int32x2_t __ret_294; \ - int32x2_t __s0_294 = __p0_294; \ +#define vcopyq_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ + int32x4_t __ret_294; \ + int32x4_t __s0_294 = __p0_294; \ int32x2_t __s2_294 = __p2_294; \ - __ret_294 = vset_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ + __ret_294 = vsetq_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ __ret_294; \ }) #else -#define vcopy_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ - int32x2_t __ret_295; \ - int32x2_t __s0_295 = __p0_295; \ +#define vcopyq_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ + int32x4_t __ret_295; \ + int32x4_t __s0_295 = __p0_295; \ int32x2_t __s2_295 = __p2_295; \ - int32x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \ + int32x4_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 3, 2, 1, 0); \ int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ - __ret_295 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ - __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \ + __ret_295 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ + __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 3, 2, 1, 0); \ __ret_295; \ }) #endif -#define vcopy_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ - int64x1_t __ret_296; \ - int64x1_t __s0_296 = __p0_296; \ +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ + int64x2_t __ret_296; \ + int64x2_t __s0_296 = __p0_296; \ int64x1_t __s2_296 = __p2_296; \ - __ret_296 = vset_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ + __ret_296 = vsetq_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ __ret_296; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ - int16x4_t __ret_297; \ - int16x4_t __s0_297 = __p0_297; \ - int16x4_t __s2_297 = __p2_297; \ - __ret_297 = vset_lane_s16(vget_lane_s16(__s2_297, __p3_297), __s0_297, __p1_297); \ +#else +#define vcopyq_lane_s64(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ + int64x2_t __ret_297; \ + int64x2_t __s0_297 = __p0_297; \ + int64x1_t __s2_297 = __p2_297; \ + int64x2_t __rev0_297; __rev0_297 = __builtin_shufflevector(__s0_297, __s0_297, 1, 0); \ + __ret_297 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_297, __p3_297), __rev0_297, __p1_297); \ + __ret_297 = __builtin_shufflevector(__ret_297, __ret_297, 1, 0); \ __ret_297; \ }) -#else -#define vcopy_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ - int16x4_t __ret_298; \ - int16x4_t __s0_298 = __p0_298; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ + int16x8_t __ret_298; \ + int16x8_t __s0_298 = __p0_298; \ int16x4_t __s2_298 = __p2_298; \ - int16x4_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \ - int16x4_t __rev2_298; __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \ - __ret_298 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_298, __p3_298), __rev0_298, __p1_298); \ - __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \ + __ret_298 = vsetq_lane_s16(vget_lane_s16(__s2_298, __p3_298), __s0_298, __p1_298); \ __ret_298; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p8(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ - poly8x16_t __ret_299; \ - poly8x16_t __s0_299 = __p0_299; \ - poly8x16_t __s2_299 = __p2_299; \ - __ret_299 = vsetq_lane_p8(vgetq_lane_p8(__s2_299, __p3_299), __s0_299, __p1_299); \ +#else +#define vcopyq_lane_s16(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ + int16x8_t __ret_299; \ + int16x8_t __s0_299 = __p0_299; \ + int16x4_t __s2_299 = __p2_299; \ + int16x8_t __rev0_299; __rev0_299 = __builtin_shufflevector(__s0_299, __s0_299, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \ + __ret_299 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_299, __p3_299), __rev0_299, __p1_299); \ + __ret_299 = __builtin_shufflevector(__ret_299, __ret_299, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_299; \ }) -#else -#define vcopyq_laneq_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ - poly8x16_t __ret_300; \ - poly8x16_t __s0_300 = __p0_300; \ - poly8x16_t __s2_300 = __p2_300; \ - poly8x16_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_300; __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_300 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_300, __p3_300), __rev0_300, __p1_300); \ - __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ + poly8x8_t __ret_300; \ + poly8x8_t __s0_300 = __p0_300; \ + poly8x8_t __s2_300 = __p2_300; \ + __ret_300 = vset_lane_p8(vget_lane_p8(__s2_300, __p3_300), __s0_300, __p1_300); \ __ret_300; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ - poly16x8_t __ret_301; \ - poly16x8_t __s0_301 = __p0_301; \ - poly16x8_t __s2_301 = __p2_301; \ - __ret_301 = vsetq_lane_p16(vgetq_lane_p16(__s2_301, __p3_301), __s0_301, __p1_301); \ +#else +#define vcopy_lane_p8(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ + poly8x8_t __ret_301; \ + poly8x8_t __s0_301 = __p0_301; \ + poly8x8_t __s2_301 = __p2_301; \ + poly8x8_t __rev0_301; __rev0_301 = __builtin_shufflevector(__s0_301, __s0_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_301; __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_301 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_301, __p3_301), __rev0_301, __p1_301); \ + __ret_301 = __builtin_shufflevector(__ret_301, __ret_301, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_301; \ }) -#else -#define vcopyq_laneq_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ - poly16x8_t __ret_302; \ - poly16x8_t __s0_302 = __p0_302; \ - poly16x8_t __s2_302 = __p2_302; \ - poly16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev2_302; __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_302 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_302, __p3_302), __rev0_302, __p1_302); \ - __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ + poly16x4_t __ret_302; \ + poly16x4_t __s0_302 = __p0_302; \ + poly16x4_t __s2_302 = __p2_302; \ + __ret_302 = vset_lane_p16(vget_lane_p16(__s2_302, __p3_302), __s0_302, __p1_302); \ __ret_302; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ - uint8x16_t __ret_303; \ - uint8x16_t __s0_303 = __p0_303; \ - uint8x16_t __s2_303 = __p2_303; \ - __ret_303 = vsetq_lane_u8(vgetq_lane_u8(__s2_303, __p3_303), __s0_303, __p1_303); \ +#else +#define vcopy_lane_p16(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ + poly16x4_t __ret_303; \ + poly16x4_t __s0_303 = __p0_303; \ + poly16x4_t __s2_303 = __p2_303; \ + poly16x4_t __rev0_303; __rev0_303 = __builtin_shufflevector(__s0_303, __s0_303, 3, 2, 1, 0); \ + poly16x4_t __rev2_303; __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 3, 2, 1, 0); \ + __ret_303 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_303, __p3_303), __rev0_303, __p1_303); \ + __ret_303 = __builtin_shufflevector(__ret_303, __ret_303, 3, 2, 1, 0); \ __ret_303; \ }) -#else -#define vcopyq_laneq_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ - uint8x16_t __ret_304; \ - uint8x16_t __s0_304 = __p0_304; \ - uint8x16_t __s2_304 = __p2_304; \ - uint8x16_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_304; __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_304 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_304, __p3_304), __rev0_304, __p1_304); \ - __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ + uint8x8_t __ret_304; \ + uint8x8_t __s0_304 = __p0_304; \ + uint8x8_t __s2_304 = __p2_304; \ + __ret_304 = vset_lane_u8(vget_lane_u8(__s2_304, __p3_304), __s0_304, __p1_304); \ __ret_304; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ - uint32x4_t __ret_305; \ - uint32x4_t __s0_305 = __p0_305; \ - uint32x4_t __s2_305 = __p2_305; \ - __ret_305 = vsetq_lane_u32(vgetq_lane_u32(__s2_305, __p3_305), __s0_305, __p1_305); \ +#else +#define vcopy_lane_u8(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + uint8x8_t __ret_305; \ + uint8x8_t __s0_305 = __p0_305; \ + uint8x8_t __s2_305 = __p2_305; \ + uint8x8_t __rev0_305; __rev0_305 = __builtin_shufflevector(__s0_305, __s0_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_305; __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_305 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_305, __p3_305), __rev0_305, __p1_305); \ + __ret_305 = __builtin_shufflevector(__ret_305, __ret_305, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_305; \ }) -#else -#define vcopyq_laneq_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ - uint32x4_t __ret_306; \ - uint32x4_t __s0_306 = __p0_306; \ - uint32x4_t __s2_306 = __p2_306; \ - uint32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ - uint32x4_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \ - __ret_306 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_306, __p3_306), __rev0_306, __p1_306); \ - __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + uint32x2_t __ret_306; \ + uint32x2_t __s0_306 = __p0_306; \ + uint32x2_t __s2_306 = __p2_306; \ + __ret_306 = vset_lane_u32(vget_lane_u32(__s2_306, __p3_306), __s0_306, __p1_306); \ __ret_306; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u64(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ - uint64x2_t __ret_307; \ - uint64x2_t __s0_307 = __p0_307; \ - uint64x2_t __s2_307 = __p2_307; \ - __ret_307 = vsetq_lane_u64(vgetq_lane_u64(__s2_307, __p3_307), __s0_307, __p1_307); \ +#else +#define vcopy_lane_u32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + uint32x2_t __ret_307; \ + uint32x2_t __s0_307 = __p0_307; \ + uint32x2_t __s2_307 = __p2_307; \ + uint32x2_t __rev0_307; __rev0_307 = __builtin_shufflevector(__s0_307, __s0_307, 1, 0); \ + uint32x2_t __rev2_307; __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 1, 0); \ + __ret_307 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_307, __p3_307), __rev0_307, __p1_307); \ + __ret_307 = __builtin_shufflevector(__ret_307, __ret_307, 1, 0); \ __ret_307; \ }) -#else -#define vcopyq_laneq_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ - uint64x2_t __ret_308; \ - uint64x2_t __s0_308 = __p0_308; \ - uint64x2_t __s2_308 = __p2_308; \ - uint64x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ - uint64x2_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 1, 0); \ - __ret_308 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_308, __p3_308), __rev0_308, __p1_308); \ - __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ - __ret_308; \ -}) #endif +#define vcopy_lane_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + uint64x1_t __ret_308; \ + uint64x1_t __s0_308 = __p0_308; \ + uint64x1_t __s2_308 = __p2_308; \ + __ret_308 = vset_lane_u64(vget_lane_u64(__s2_308, __p3_308), __s0_308, __p1_308); \ + __ret_308; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ - uint16x8_t __ret_309; \ - uint16x8_t __s0_309 = __p0_309; \ - uint16x8_t __s2_309 = __p2_309; \ - __ret_309 = vsetq_lane_u16(vgetq_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ +#define vcopy_lane_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + uint16x4_t __ret_309; \ + uint16x4_t __s0_309 = __p0_309; \ + uint16x4_t __s2_309 = __p2_309; \ + __ret_309 = vset_lane_u16(vget_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ __ret_309; \ }) #else -#define vcopyq_laneq_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ - uint16x8_t __ret_310; \ - uint16x8_t __s0_310 = __p0_310; \ - uint16x8_t __s2_310 = __p2_310; \ - uint16x8_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_310 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ - __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcopy_lane_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + uint16x4_t __ret_310; \ + uint16x4_t __s0_310 = __p0_310; \ + uint16x4_t __s2_310 = __p2_310; \ + uint16x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ + uint16x4_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 3, 2, 1, 0); \ + __ret_310 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ __ret_310; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ - int8x16_t __ret_311; \ - int8x16_t __s0_311 = __p0_311; \ - int8x16_t __s2_311 = __p2_311; \ - __ret_311 = vsetq_lane_s8(vgetq_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ +#define vcopy_lane_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int8x8_t __ret_311; \ + int8x8_t __s0_311 = __p0_311; \ + int8x8_t __s2_311 = __p2_311; \ + __ret_311 = vset_lane_s8(vget_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ __ret_311; \ }) #else -#define vcopyq_laneq_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ - int8x16_t __ret_312; \ - int8x16_t __s0_312 = __p0_312; \ - int8x16_t __s2_312 = __p2_312; \ - int8x16_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_312 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ - __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcopy_lane_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int8x8_t __ret_312; \ + int8x8_t __s0_312 = __p0_312; \ + int8x8_t __s2_312 = __p2_312; \ + int8x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_312; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ - float32x4_t __ret_313; \ - float32x4_t __s0_313 = __p0_313; \ - float32x4_t __s2_313 = __p2_313; \ - __ret_313 = vsetq_lane_f32(vgetq_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ +#define vcopy_lane_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + float32x2_t __ret_313; \ + float32x2_t __s0_313 = __p0_313; \ + float32x2_t __s2_313 = __p2_313; \ + __ret_313 = vset_lane_f32(vget_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ __ret_313; \ }) #else -#define vcopyq_laneq_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ - float32x4_t __ret_314; \ - float32x4_t __s0_314 = __p0_314; \ - float32x4_t __s2_314 = __p2_314; \ - float32x4_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 3, 2, 1, 0); \ - float32x4_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 3, 2, 1, 0); \ - __ret_314 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ - __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 3, 2, 1, 0); \ +#define vcopy_lane_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + float32x2_t __ret_314; \ + float32x2_t __s0_314 = __p0_314; \ + float32x2_t __s2_314 = __p2_314; \ + float32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ + float32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ + __ret_314 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ __ret_314; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ - int32x4_t __ret_315; \ - int32x4_t __s0_315 = __p0_315; \ - int32x4_t __s2_315 = __p2_315; \ - __ret_315 = vsetq_lane_s32(vgetq_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ +#define vcopy_lane_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int32x2_t __ret_315; \ + int32x2_t __s0_315 = __p0_315; \ + int32x2_t __s2_315 = __p2_315; \ + __ret_315 = vset_lane_s32(vget_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ __ret_315; \ }) #else -#define vcopyq_laneq_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ - int32x4_t __ret_316; \ - int32x4_t __s0_316 = __p0_316; \ - int32x4_t __s2_316 = __p2_316; \ - int32x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ - int32x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ - __ret_316 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ - __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ +#define vcopy_lane_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int32x2_t __ret_316; \ + int32x2_t __s0_316 = __p0_316; \ + int32x2_t __s2_316 = __p2_316; \ + int32x2_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 1, 0); \ + int32x2_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 1, 0); \ + __ret_316 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 1, 0); \ __ret_316; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ - int64x2_t __ret_317; \ - int64x2_t __s0_317 = __p0_317; \ - int64x2_t __s2_317 = __p2_317; \ - __ret_317 = vsetq_lane_s64(vgetq_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ +#define vcopy_lane_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int64x1_t __ret_317; \ + int64x1_t __s0_317 = __p0_317; \ + int64x1_t __s2_317 = __p2_317; \ + __ret_317 = vset_lane_s64(vget_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ __ret_317; \ }) -#else -#define vcopyq_laneq_s64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ - int64x2_t __ret_318; \ - int64x2_t __s0_318 = __p0_318; \ - int64x2_t __s2_318 = __p2_318; \ - int64x2_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 1, 0); \ - int64x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ - __ret_318 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_318, __p3_318), __rev0_318, __p1_318); \ - __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 1, 0); \ +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s16(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int16x4_t __ret_318; \ + int16x4_t __s0_318 = __p0_318; \ + int16x4_t __s2_318 = __p2_318; \ + __ret_318 = vset_lane_s16(vget_lane_s16(__s2_318, __p3_318), __s0_318, __p1_318); \ __ret_318; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ - int16x8_t __ret_319; \ - int16x8_t __s0_319 = __p0_319; \ - int16x8_t __s2_319 = __p2_319; \ - __ret_319 = vsetq_lane_s16(vgetq_lane_s16(__s2_319, __p3_319), __s0_319, __p1_319); \ +#else +#define vcopy_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x4_t __ret_319; \ + int16x4_t __s0_319 = __p0_319; \ + int16x4_t __s2_319 = __p2_319; \ + int16x4_t __rev0_319; __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 3, 2, 1, 0); \ + int16x4_t __rev2_319; __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 3, 2, 1, 0); \ + __ret_319 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_319, __p3_319), __rev0_319, __p1_319); \ + __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 3, 2, 1, 0); \ __ret_319; \ }) -#else -#define vcopyq_laneq_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ - int16x8_t __ret_320; \ - int16x8_t __s0_320 = __p0_320; \ - int16x8_t __s2_320 = __p2_320; \ - int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_320 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_320, __p3_320), __rev0_320, __p1_320); \ - __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p8(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + poly8x16_t __ret_320; \ + poly8x16_t __s0_320 = __p0_320; \ + poly8x16_t __s2_320 = __p2_320; \ + __ret_320 = vsetq_lane_p8(vgetq_lane_p8(__s2_320, __p3_320), __s0_320, __p1_320); \ __ret_320; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ - poly8x8_t __ret_321; \ - poly8x8_t __s0_321 = __p0_321; \ +#else +#define vcopyq_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + poly8x16_t __ret_321; \ + poly8x16_t __s0_321 = __p0_321; \ poly8x16_t __s2_321 = __p2_321; \ - __ret_321 = vset_lane_p8(vgetq_lane_p8(__s2_321, __p3_321), __s0_321, __p1_321); \ + poly8x16_t __rev0_321; __rev0_321 = __builtin_shufflevector(__s0_321, __s0_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_321; __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_321 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_321, __p3_321), __rev0_321, __p1_321); \ + __ret_321 = __builtin_shufflevector(__ret_321, __ret_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_321; \ }) -#else -#define vcopy_laneq_p8(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ - poly8x8_t __ret_322; \ - poly8x8_t __s0_322 = __p0_322; \ - poly8x16_t __s2_322 = __p2_322; \ - poly8x8_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_322 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_322, __p3_322), __rev0_322, __p1_322); \ - __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p16(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + poly16x8_t __ret_322; \ + poly16x8_t __s0_322 = __p0_322; \ + poly16x8_t __s2_322 = __p2_322; \ + __ret_322 = vsetq_lane_p16(vgetq_lane_p16(__s2_322, __p3_322), __s0_322, __p1_322); \ __ret_322; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ - poly16x4_t __ret_323; \ - poly16x4_t __s0_323 = __p0_323; \ +#else +#define vcopyq_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + poly16x8_t __ret_323; \ + poly16x8_t __s0_323 = __p0_323; \ poly16x8_t __s2_323 = __p2_323; \ - __ret_323 = vset_lane_p16(vgetq_lane_p16(__s2_323, __p3_323), __s0_323, __p1_323); \ + poly16x8_t __rev0_323; __rev0_323 = __builtin_shufflevector(__s0_323, __s0_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_323; __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_323 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_323, __p3_323), __rev0_323, __p1_323); \ + __ret_323 = __builtin_shufflevector(__ret_323, __ret_323, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_323; \ }) -#else -#define vcopy_laneq_p16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ - poly16x4_t __ret_324; \ - poly16x4_t __s0_324 = __p0_324; \ - poly16x8_t __s2_324 = __p2_324; \ - poly16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ - poly16x8_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_324 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_324, __p3_324), __rev0_324, __p1_324); \ - __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u8(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + uint8x16_t __ret_324; \ + uint8x16_t __s0_324 = __p0_324; \ + uint8x16_t __s2_324 = __p2_324; \ + __ret_324 = vsetq_lane_u8(vgetq_lane_u8(__s2_324, __p3_324), __s0_324, __p1_324); \ __ret_324; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ - uint8x8_t __ret_325; \ - uint8x8_t __s0_325 = __p0_325; \ +#else +#define vcopyq_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + uint8x16_t __ret_325; \ + uint8x16_t __s0_325 = __p0_325; \ uint8x16_t __s2_325 = __p2_325; \ - __ret_325 = vset_lane_u8(vgetq_lane_u8(__s2_325, __p3_325), __s0_325, __p1_325); \ + uint8x16_t __rev0_325; __rev0_325 = __builtin_shufflevector(__s0_325, __s0_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_325; __rev2_325 = __builtin_shufflevector(__s2_325, __s2_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_325 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_325, __p3_325), __rev0_325, __p1_325); \ + __ret_325 = __builtin_shufflevector(__ret_325, __ret_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_325; \ }) -#else -#define vcopy_laneq_u8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ - uint8x8_t __ret_326; \ - uint8x8_t __s0_326 = __p0_326; \ - uint8x16_t __s2_326 = __p2_326; \ - uint8x8_t __rev0_326; __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_326 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_326, __p3_326), __rev0_326, __p1_326); \ - __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + uint32x4_t __ret_326; \ + uint32x4_t __s0_326 = __p0_326; \ + uint32x4_t __s2_326 = __p2_326; \ + __ret_326 = vsetq_lane_u32(vgetq_lane_u32(__s2_326, __p3_326), __s0_326, __p1_326); \ __ret_326; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ - uint32x2_t __ret_327; \ - uint32x2_t __s0_327 = __p0_327; \ +#else +#define vcopyq_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + uint32x4_t __ret_327; \ + uint32x4_t __s0_327 = __p0_327; \ uint32x4_t __s2_327 = __p2_327; \ - __ret_327 = vset_lane_u32(vgetq_lane_u32(__s2_327, __p3_327), __s0_327, __p1_327); \ + uint32x4_t __rev0_327; __rev0_327 = __builtin_shufflevector(__s0_327, __s0_327, 3, 2, 1, 0); \ + uint32x4_t __rev2_327; __rev2_327 = __builtin_shufflevector(__s2_327, __s2_327, 3, 2, 1, 0); \ + __ret_327 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_327, __p3_327), __rev0_327, __p1_327); \ + __ret_327 = __builtin_shufflevector(__ret_327, __ret_327, 3, 2, 1, 0); \ __ret_327; \ }) -#else -#define vcopy_laneq_u32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ - uint32x2_t __ret_328; \ - uint32x2_t __s0_328 = __p0_328; \ - uint32x4_t __s2_328 = __p2_328; \ - uint32x2_t __rev0_328; __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \ - uint32x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ - __ret_328 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_328, __p3_328), __rev0_328, __p1_328); \ - __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u64(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + uint64x2_t __ret_328; \ + uint64x2_t __s0_328 = __p0_328; \ + uint64x2_t __s2_328 = __p2_328; \ + __ret_328 = vsetq_lane_u64(vgetq_lane_u64(__s2_328, __p3_328), __s0_328, __p1_328); \ __ret_328; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ - uint64x1_t __ret_329; \ - uint64x1_t __s0_329 = __p0_329; \ +#else +#define vcopyq_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + uint64x2_t __ret_329; \ + uint64x2_t __s0_329 = __p0_329; \ uint64x2_t __s2_329 = __p2_329; \ - __ret_329 = vset_lane_u64(vgetq_lane_u64(__s2_329, __p3_329), __s0_329, __p1_329); \ + uint64x2_t __rev0_329; __rev0_329 = __builtin_shufflevector(__s0_329, __s0_329, 1, 0); \ + uint64x2_t __rev2_329; __rev2_329 = __builtin_shufflevector(__s2_329, __s2_329, 1, 0); \ + __ret_329 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_329, __p3_329), __rev0_329, __p1_329); \ + __ret_329 = __builtin_shufflevector(__ret_329, __ret_329, 1, 0); \ __ret_329; \ }) -#else -#define vcopy_laneq_u64(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ - uint64x1_t __ret_330; \ - uint64x1_t __s0_330 = __p0_330; \ - uint64x2_t __s2_330 = __p2_330; \ - uint64x2_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 1, 0); \ - __ret_330 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_330, __p3_330), __s0_330, __p1_330); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u16(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + uint16x8_t __ret_330; \ + uint16x8_t __s0_330 = __p0_330; \ + uint16x8_t __s2_330 = __p2_330; \ + __ret_330 = vsetq_lane_u16(vgetq_lane_u16(__s2_330, __p3_330), __s0_330, __p1_330); \ __ret_330; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ - uint16x4_t __ret_331; \ - uint16x4_t __s0_331 = __p0_331; \ +#else +#define vcopyq_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + uint16x8_t __ret_331; \ + uint16x8_t __s0_331 = __p0_331; \ uint16x8_t __s2_331 = __p2_331; \ - __ret_331 = vset_lane_u16(vgetq_lane_u16(__s2_331, __p3_331), __s0_331, __p1_331); \ + uint16x8_t __rev0_331; __rev0_331 = __builtin_shufflevector(__s0_331, __s0_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_331; __rev2_331 = __builtin_shufflevector(__s2_331, __s2_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_331 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_331, __p3_331), __rev0_331, __p1_331); \ + __ret_331 = __builtin_shufflevector(__ret_331, __ret_331, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_331; \ }) -#else -#define vcopy_laneq_u16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ - uint16x4_t __ret_332; \ - uint16x4_t __s0_332 = __p0_332; \ - uint16x8_t __s2_332 = __p2_332; \ - uint16x4_t __rev0_332; __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \ - uint16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_332 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_332, __p3_332), __rev0_332, __p1_332); \ - __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s8(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + int8x16_t __ret_332; \ + int8x16_t __s0_332 = __p0_332; \ + int8x16_t __s2_332 = __p2_332; \ + __ret_332 = vsetq_lane_s8(vgetq_lane_s8(__s2_332, __p3_332), __s0_332, __p1_332); \ __ret_332; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ - int8x8_t __ret_333; \ - int8x8_t __s0_333 = __p0_333; \ +#else +#define vcopyq_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int8x16_t __ret_333; \ + int8x16_t __s0_333 = __p0_333; \ int8x16_t __s2_333 = __p2_333; \ - __ret_333 = vset_lane_s8(vgetq_lane_s8(__s2_333, __p3_333), __s0_333, __p1_333); \ + int8x16_t __rev0_333; __rev0_333 = __builtin_shufflevector(__s0_333, __s0_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_333; __rev2_333 = __builtin_shufflevector(__s2_333, __s2_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_333 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_333, __p3_333), __rev0_333, __p1_333); \ + __ret_333 = __builtin_shufflevector(__ret_333, __ret_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_333; \ }) -#else -#define vcopy_laneq_s8(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ - int8x8_t __ret_334; \ - int8x8_t __s0_334 = __p0_334; \ - int8x16_t __s2_334 = __p2_334; \ - int8x8_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_334 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_334, __p3_334), __rev0_334, __p1_334); \ - __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + float32x4_t __ret_334; \ + float32x4_t __s0_334 = __p0_334; \ + float32x4_t __s2_334 = __p2_334; \ + __ret_334 = vsetq_lane_f32(vgetq_lane_f32(__s2_334, __p3_334), __s0_334, __p1_334); \ __ret_334; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ - float32x2_t __ret_335; \ - float32x2_t __s0_335 = __p0_335; \ +#else +#define vcopyq_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + float32x4_t __ret_335; \ + float32x4_t __s0_335 = __p0_335; \ float32x4_t __s2_335 = __p2_335; \ - __ret_335 = vset_lane_f32(vgetq_lane_f32(__s2_335, __p3_335), __s0_335, __p1_335); \ + float32x4_t __rev0_335; __rev0_335 = __builtin_shufflevector(__s0_335, __s0_335, 3, 2, 1, 0); \ + float32x4_t __rev2_335; __rev2_335 = __builtin_shufflevector(__s2_335, __s2_335, 3, 2, 1, 0); \ + __ret_335 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_335, __p3_335), __rev0_335, __p1_335); \ + __ret_335 = __builtin_shufflevector(__ret_335, __ret_335, 3, 2, 1, 0); \ __ret_335; \ }) -#else -#define vcopy_laneq_f32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ - float32x2_t __ret_336; \ - float32x2_t __s0_336 = __p0_336; \ - float32x4_t __s2_336 = __p2_336; \ - float32x2_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 1, 0); \ - float32x4_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 3, 2, 1, 0); \ - __ret_336 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_336, __p3_336), __rev0_336, __p1_336); \ - __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + int32x4_t __ret_336; \ + int32x4_t __s0_336 = __p0_336; \ + int32x4_t __s2_336 = __p2_336; \ + __ret_336 = vsetq_lane_s32(vgetq_lane_s32(__s2_336, __p3_336), __s0_336, __p1_336); \ __ret_336; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ - int32x2_t __ret_337; \ - int32x2_t __s0_337 = __p0_337; \ +#else +#define vcopyq_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x4_t __ret_337; \ + int32x4_t __s0_337 = __p0_337; \ int32x4_t __s2_337 = __p2_337; \ - __ret_337 = vset_lane_s32(vgetq_lane_s32(__s2_337, __p3_337), __s0_337, __p1_337); \ + int32x4_t __rev0_337; __rev0_337 = __builtin_shufflevector(__s0_337, __s0_337, 3, 2, 1, 0); \ + int32x4_t __rev2_337; __rev2_337 = __builtin_shufflevector(__s2_337, __s2_337, 3, 2, 1, 0); \ + __ret_337 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_337, __p3_337), __rev0_337, __p1_337); \ + __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 3, 2, 1, 0); \ __ret_337; \ }) -#else -#define vcopy_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ - int32x2_t __ret_338; \ - int32x2_t __s0_338 = __p0_338; \ - int32x4_t __s2_338 = __p2_338; \ - int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ - int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ - __ret_338 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_338, __p3_338), __rev0_338, __p1_338); \ - __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s64(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int64x2_t __ret_338; \ + int64x2_t __s0_338 = __p0_338; \ + int64x2_t __s2_338 = __p2_338; \ + __ret_338 = vsetq_lane_s64(vgetq_lane_s64(__s2_338, __p3_338), __s0_338, __p1_338); \ __ret_338; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ - int64x1_t __ret_339; \ - int64x1_t __s0_339 = __p0_339; \ +#else +#define vcopyq_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int64x2_t __ret_339; \ + int64x2_t __s0_339 = __p0_339; \ int64x2_t __s2_339 = __p2_339; \ - __ret_339 = vset_lane_s64(vgetq_lane_s64(__s2_339, __p3_339), __s0_339, __p1_339); \ + int64x2_t __rev0_339; __rev0_339 = __builtin_shufflevector(__s0_339, __s0_339, 1, 0); \ + int64x2_t __rev2_339; __rev2_339 = __builtin_shufflevector(__s2_339, __s2_339, 1, 0); \ + __ret_339 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_339, __p3_339), __rev0_339, __p1_339); \ + __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \ __ret_339; \ }) -#else -#define vcopy_laneq_s64(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ - int64x1_t __ret_340; \ - int64x1_t __s0_340 = __p0_340; \ - int64x2_t __s2_340 = __p2_340; \ - int64x2_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 1, 0); \ - __ret_340 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_340, __p3_340), __s0_340, __p1_340); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int16x8_t __ret_340; \ + int16x8_t __s0_340 = __p0_340; \ + int16x8_t __s2_340 = __p2_340; \ + __ret_340 = vsetq_lane_s16(vgetq_lane_s16(__s2_340, __p3_340), __s0_340, __p1_340); \ __ret_340; \ }) +#else +#define vcopyq_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + int16x8_t __ret_341; \ + int16x8_t __s0_341 = __p0_341; \ + int16x8_t __s2_341 = __p2_341; \ + int16x8_t __rev0_341; __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_341; __rev2_341 = __builtin_shufflevector(__s2_341, __s2_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_341, __p3_341), __rev0_341, __p1_341); \ + __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ - int16x4_t __ret_341; \ - int16x4_t __s0_341 = __p0_341; \ - int16x8_t __s2_341 = __p2_341; \ - __ret_341 = vset_lane_s16(vgetq_lane_s16(__s2_341, __p3_341), __s0_341, __p1_341); \ - __ret_341; \ +#define vcopy_laneq_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + poly8x8_t __ret_342; \ + poly8x8_t __s0_342 = __p0_342; \ + poly8x16_t __s2_342 = __p2_342; \ + __ret_342 = vset_lane_p8(vgetq_lane_p8(__s2_342, __p3_342), __s0_342, __p1_342); \ + __ret_342; \ }) #else -#define vcopy_laneq_s16(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ - int16x4_t __ret_342; \ - int16x4_t __s0_342 = __p0_342; \ - int16x8_t __s2_342 = __p2_342; \ - int16x4_t __rev0_342; __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 3, 2, 1, 0); \ - int16x8_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_342 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_342, __p3_342), __rev0_342, __p1_342); \ - __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 3, 2, 1, 0); \ - __ret_342; \ +#define vcopy_laneq_p8(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ + poly8x8_t __ret_343; \ + poly8x8_t __s0_343 = __p0_343; \ + poly8x16_t __s2_343 = __p2_343; \ + poly8x8_t __rev0_343; __rev0_343 = __builtin_shufflevector(__s0_343, __s0_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_343; __rev2_343 = __builtin_shufflevector(__s2_343, __s2_343, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_343, __p3_343), __rev0_343, __p1_343); \ + __ret_343 = __builtin_shufflevector(__ret_343, __ret_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ + poly16x4_t __ret_344; \ + poly16x4_t __s0_344 = __p0_344; \ + poly16x8_t __s2_344 = __p2_344; \ + __ret_344 = vset_lane_p16(vgetq_lane_p16(__s2_344, __p3_344), __s0_344, __p1_344); \ + __ret_344; \ +}) +#else +#define vcopy_laneq_p16(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ + poly16x4_t __ret_345; \ + poly16x4_t __s0_345 = __p0_345; \ + poly16x8_t __s2_345 = __p2_345; \ + poly16x4_t __rev0_345; __rev0_345 = __builtin_shufflevector(__s0_345, __s0_345, 3, 2, 1, 0); \ + poly16x8_t __rev2_345; __rev2_345 = __builtin_shufflevector(__s2_345, __s2_345, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_345 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_345, __p3_345), __rev0_345, __p1_345); \ + __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 3, 2, 1, 0); \ + __ret_345; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ + uint8x8_t __ret_346; \ + uint8x8_t __s0_346 = __p0_346; \ + uint8x16_t __s2_346 = __p2_346; \ + __ret_346 = vset_lane_u8(vgetq_lane_u8(__s2_346, __p3_346), __s0_346, __p1_346); \ + __ret_346; \ +}) +#else +#define vcopy_laneq_u8(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ + uint8x8_t __ret_347; \ + uint8x8_t __s0_347 = __p0_347; \ + uint8x16_t __s2_347 = __p2_347; \ + uint8x8_t __rev0_347; __rev0_347 = __builtin_shufflevector(__s0_347, __s0_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_347; __rev2_347 = __builtin_shufflevector(__s2_347, __s2_347, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_347, __p3_347), __rev0_347, __p1_347); \ + __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ + uint32x2_t __ret_348; \ + uint32x2_t __s0_348 = __p0_348; \ + uint32x4_t __s2_348 = __p2_348; \ + __ret_348 = vset_lane_u32(vgetq_lane_u32(__s2_348, __p3_348), __s0_348, __p1_348); \ + __ret_348; \ +}) +#else +#define vcopy_laneq_u32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ + uint32x2_t __ret_349; \ + uint32x2_t __s0_349 = __p0_349; \ + uint32x4_t __s2_349 = __p2_349; \ + uint32x2_t __rev0_349; __rev0_349 = __builtin_shufflevector(__s0_349, __s0_349, 1, 0); \ + uint32x4_t __rev2_349; __rev2_349 = __builtin_shufflevector(__s2_349, __s2_349, 3, 2, 1, 0); \ + __ret_349 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_349, __p3_349), __rev0_349, __p1_349); \ + __ret_349 = __builtin_shufflevector(__ret_349, __ret_349, 1, 0); \ + __ret_349; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ + uint64x1_t __ret_350; \ + uint64x1_t __s0_350 = __p0_350; \ + uint64x2_t __s2_350 = __p2_350; \ + __ret_350 = vset_lane_u64(vgetq_lane_u64(__s2_350, __p3_350), __s0_350, __p1_350); \ + __ret_350; \ +}) +#else +#define vcopy_laneq_u64(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ + uint64x1_t __ret_351; \ + uint64x1_t __s0_351 = __p0_351; \ + uint64x2_t __s2_351 = __p2_351; \ + uint64x2_t __rev2_351; __rev2_351 = __builtin_shufflevector(__s2_351, __s2_351, 1, 0); \ + __ret_351 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_351, __p3_351), __s0_351, __p1_351); \ + __ret_351; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ + uint16x4_t __ret_352; \ + uint16x4_t __s0_352 = __p0_352; \ + uint16x8_t __s2_352 = __p2_352; \ + __ret_352 = vset_lane_u16(vgetq_lane_u16(__s2_352, __p3_352), __s0_352, __p1_352); \ + __ret_352; \ +}) +#else +#define vcopy_laneq_u16(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ + uint16x4_t __ret_353; \ + uint16x4_t __s0_353 = __p0_353; \ + uint16x8_t __s2_353 = __p2_353; \ + uint16x4_t __rev0_353; __rev0_353 = __builtin_shufflevector(__s0_353, __s0_353, 3, 2, 1, 0); \ + uint16x8_t __rev2_353; __rev2_353 = __builtin_shufflevector(__s2_353, __s2_353, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_353 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_353, __p3_353), __rev0_353, __p1_353); \ + __ret_353 = __builtin_shufflevector(__ret_353, __ret_353, 3, 2, 1, 0); \ + __ret_353; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ + int8x8_t __ret_354; \ + int8x8_t __s0_354 = __p0_354; \ + int8x16_t __s2_354 = __p2_354; \ + __ret_354 = vset_lane_s8(vgetq_lane_s8(__s2_354, __p3_354), __s0_354, __p1_354); \ + __ret_354; \ +}) +#else +#define vcopy_laneq_s8(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ + int8x8_t __ret_355; \ + int8x8_t __s0_355 = __p0_355; \ + int8x16_t __s2_355 = __p2_355; \ + int8x8_t __rev0_355; __rev0_355 = __builtin_shufflevector(__s0_355, __s0_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_355; __rev2_355 = __builtin_shufflevector(__s2_355, __s2_355, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_355, __p3_355), __rev0_355, __p1_355); \ + __ret_355 = __builtin_shufflevector(__ret_355, __ret_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ + float32x2_t __ret_356; \ + float32x2_t __s0_356 = __p0_356; \ + float32x4_t __s2_356 = __p2_356; \ + __ret_356 = vset_lane_f32(vgetq_lane_f32(__s2_356, __p3_356), __s0_356, __p1_356); \ + __ret_356; \ +}) +#else +#define vcopy_laneq_f32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ + float32x2_t __ret_357; \ + float32x2_t __s0_357 = __p0_357; \ + float32x4_t __s2_357 = __p2_357; \ + float32x2_t __rev0_357; __rev0_357 = __builtin_shufflevector(__s0_357, __s0_357, 1, 0); \ + float32x4_t __rev2_357; __rev2_357 = __builtin_shufflevector(__s2_357, __s2_357, 3, 2, 1, 0); \ + __ret_357 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_357, __p3_357), __rev0_357, __p1_357); \ + __ret_357 = __builtin_shufflevector(__ret_357, __ret_357, 1, 0); \ + __ret_357; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ + int32x2_t __ret_358; \ + int32x2_t __s0_358 = __p0_358; \ + int32x4_t __s2_358 = __p2_358; \ + __ret_358 = vset_lane_s32(vgetq_lane_s32(__s2_358, __p3_358), __s0_358, __p1_358); \ + __ret_358; \ +}) +#else +#define vcopy_laneq_s32(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ + int32x2_t __ret_359; \ + int32x2_t __s0_359 = __p0_359; \ + int32x4_t __s2_359 = __p2_359; \ + int32x2_t __rev0_359; __rev0_359 = __builtin_shufflevector(__s0_359, __s0_359, 1, 0); \ + int32x4_t __rev2_359; __rev2_359 = __builtin_shufflevector(__s2_359, __s2_359, 3, 2, 1, 0); \ + __ret_359 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_359, __p3_359), __rev0_359, __p1_359); \ + __ret_359 = __builtin_shufflevector(__ret_359, __ret_359, 1, 0); \ + __ret_359; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ + int64x1_t __ret_360; \ + int64x1_t __s0_360 = __p0_360; \ + int64x2_t __s2_360 = __p2_360; \ + __ret_360 = vset_lane_s64(vgetq_lane_s64(__s2_360, __p3_360), __s0_360, __p1_360); \ + __ret_360; \ +}) +#else +#define vcopy_laneq_s64(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ + int64x1_t __ret_361; \ + int64x1_t __s0_361 = __p0_361; \ + int64x2_t __s2_361 = __p2_361; \ + int64x2_t __rev2_361; __rev2_361 = __builtin_shufflevector(__s2_361, __s2_361, 1, 0); \ + __ret_361 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_361, __p3_361), __s0_361, __p1_361); \ + __ret_361; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ + int16x4_t __ret_362; \ + int16x4_t __s0_362 = __p0_362; \ + int16x8_t __s2_362 = __p2_362; \ + __ret_362 = vset_lane_s16(vgetq_lane_s16(__s2_362, __p3_362), __s0_362, __p1_362); \ + __ret_362; \ +}) +#else +#define vcopy_laneq_s16(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ + int16x4_t __ret_363; \ + int16x4_t __s0_363 = __p0_363; \ + int16x8_t __s2_363 = __p2_363; \ + int16x4_t __rev0_363; __rev0_363 = __builtin_shufflevector(__s0_363, __s0_363, 3, 2, 1, 0); \ + int16x8_t __rev2_363; __rev2_363 = __builtin_shufflevector(__s2_363, __s2_363, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_363 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_363, __p3_363), __rev0_363, __p1_363); \ + __ret_363 = __builtin_shufflevector(__ret_363, __ret_363, 3, 2, 1, 0); \ + __ret_363; \ }) #endif @@ -44721,55 +46433,55 @@ __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { __ret = (float64x1_t)(__promote); \ __ret; \ }) -__ai float32_t vcvts_f32_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vcvts_f32_s32(int32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); return __ret; } -__ai float32_t vcvts_f32_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vcvts_f32_u32(uint32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); return __ret; } #else -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); return __ret; } #endif -__ai float64_t vcvtd_f64_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_s64(int64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); return __ret; } -__ai float64_t vcvtd_f64_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_u64(uint64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); @@ -44779,13 +46491,13 @@ __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); @@ -44794,31 +46506,31 @@ __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { } #endif -__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_u64(uint64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); return __ret; } -__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_s64(int64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); return __ret; } #else -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); return __ret; @@ -44826,13 +46538,13 @@ __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { float16x8_t __ret; __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); return __ret; } #else -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { float16x8_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -44843,13 +46555,13 @@ __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_f16(vget_high_f16(__p0)); return __ret; } #else -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { float32x4_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); @@ -44859,13 +46571,13 @@ __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); return __ret; } #else -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -44876,13 +46588,13 @@ __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { float64x2_t __ret; __ret = vcvt_f64_f32(vget_high_f32(__p0)); return __ret; } #else -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { float64x2_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); @@ -45035,24 +46747,24 @@ __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ __ret; \ }) -__ai int32_t vcvts_s32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vcvts_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); return __ret; } -__ai int64_t vcvtd_s64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vcvtd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); @@ -45061,29 +46773,29 @@ __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { } #endif -__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vcvt_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); return __ret; } -__ai uint32_t vcvts_u32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcvts_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); return __ret; } -__ai uint64_t vcvtd_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcvtd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); @@ -45092,24 +46804,24 @@ __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { } #endif -__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcvt_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); return __ret; } -__ai int32_t vcvtas_s32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vcvtas_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); @@ -45118,29 +46830,29 @@ __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { } #endif -__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vcvta_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); return __ret; } -__ai int64_t vcvtad_s64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vcvtad_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); return __ret; } -__ai uint32_t vcvtas_u32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcvtas_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); @@ -45149,29 +46861,29 @@ __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { } #endif -__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcvta_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); return __ret; } -__ai uint64_t vcvtad_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcvtad_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); return __ret; } -__ai int32_t vcvtms_s32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vcvtms_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); @@ -45180,29 +46892,29 @@ __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { } #endif -__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vcvtm_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); return __ret; } -__ai int64_t vcvtmd_s64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vcvtmd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); return __ret; } -__ai uint32_t vcvtms_u32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcvtms_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); @@ -45211,29 +46923,29 @@ __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { } #endif -__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); return __ret; } -__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcvtmd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); return __ret; } -__ai int32_t vcvtns_s32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vcvtns_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); @@ -45242,29 +46954,29 @@ __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { } #endif -__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vcvtn_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); return __ret; } -__ai int64_t vcvtnd_s64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vcvtnd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); return __ret; } -__ai uint32_t vcvtns_u32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcvtns_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); @@ -45273,29 +46985,29 @@ __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { } #endif -__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); return __ret; } -__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcvtnd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); return __ret; } -__ai int32_t vcvtps_s32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vcvtps_s32_f32(float32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); @@ -45304,29 +47016,29 @@ __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { } #endif -__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vcvtp_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); return __ret; } -__ai int64_t vcvtpd_s64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vcvtpd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); return __ret; } -__ai uint32_t vcvtps_u32_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vcvtps_u32_f32(float32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); @@ -45335,36 +47047,36 @@ __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { } #endif -__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); return __ret; } -__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcvtpd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); return __ret; } -__ai float32_t vcvtxd_f32_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float32_t vcvtxd_f32_f64(float64_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); return __ret; } #else -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); return __ret; @@ -45372,13 +47084,13 @@ __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); return __ret; } #else -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { float32x4_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -45389,13 +47101,13 @@ __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 / __p1; return __ret; } #else -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -45406,13 +47118,13 @@ __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __p0 / __p1; return __ret; } #else -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -45422,19 +47134,19 @@ __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { } #endif -__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 / __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __p0 / __p1; return __ret; } #else -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -45615,51 +47327,68 @@ __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { }) #endif -#define vdup_lane_p64(__p0_343, __p1_343) __extension__ ({ \ - poly64x1_t __ret_343; \ - poly64x1_t __s0_343 = __p0_343; \ - __ret_343 = splat_lane_p64(__s0_343, __p1_343); \ - __ret_343; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p64(__p0_344, __p1_344) __extension__ ({ \ - poly64x2_t __ret_344; \ - poly64x1_t __s0_344 = __p0_344; \ - __ret_344 = splatq_lane_p64(__s0_344, __p1_344); \ - __ret_344; \ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ + __ret; \ }) #else -#define vdupq_lane_p64(__p0_345, __p1_345) __extension__ ({ \ - poly64x2_t __ret_345; \ - poly64x1_t __s0_345 = __p0_345; \ - __ret_345 = __noswap_splatq_lane_p64(__s0_345, __p1_345); \ - __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 1, 0); \ - __ret_345; \ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdup_lane_p64(__p0_364, __p1_364) __extension__ ({ \ + poly64x1_t __ret_364; \ + poly64x1_t __s0_364 = __p0_364; \ + __ret_364 = splat_lane_p64(__s0_364, __p1_364); \ + __ret_364; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p64(__p0_365, __p1_365) __extension__ ({ \ + poly64x2_t __ret_365; \ + poly64x1_t __s0_365 = __p0_365; \ + __ret_365 = splatq_lane_p64(__s0_365, __p1_365); \ + __ret_365; \ +}) +#else +#define vdupq_lane_p64(__p0_366, __p1_366) __extension__ ({ \ + poly64x2_t __ret_366; \ + poly64x1_t __s0_366 = __p0_366; \ + __ret_366 = __noswap_splatq_lane_p64(__s0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ + __ret_366; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f64(__p0_346, __p1_346) __extension__ ({ \ - float64x2_t __ret_346; \ - float64x1_t __s0_346 = __p0_346; \ - __ret_346 = splatq_lane_f64(__s0_346, __p1_346); \ - __ret_346; \ +#define vdupq_lane_f64(__p0_367, __p1_367) __extension__ ({ \ + float64x2_t __ret_367; \ + float64x1_t __s0_367 = __p0_367; \ + __ret_367 = splatq_lane_f64(__s0_367, __p1_367); \ + __ret_367; \ }) #else -#define vdupq_lane_f64(__p0_347, __p1_347) __extension__ ({ \ - float64x2_t __ret_347; \ - float64x1_t __s0_347 = __p0_347; \ - __ret_347 = __noswap_splatq_lane_f64(__s0_347, __p1_347); \ - __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 1, 0); \ - __ret_347; \ +#define vdupq_lane_f64(__p0_368, __p1_368) __extension__ ({ \ + float64x2_t __ret_368; \ + float64x1_t __s0_368 = __p0_368; \ + __ret_368 = __noswap_splatq_lane_f64(__s0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \ + __ret_368; \ }) #endif -#define vdup_lane_f64(__p0_348, __p1_348) __extension__ ({ \ - float64x1_t __ret_348; \ - float64x1_t __s0_348 = __p0_348; \ - __ret_348 = splat_lane_f64(__s0_348, __p1_348); \ - __ret_348; \ +#define vdup_lane_f64(__p0_369, __p1_369) __extension__ ({ \ + float64x1_t __ret_369; \ + float64x1_t __s0_369 = __p0_369; \ + __ret_369 = splat_lane_f64(__s0_369, __p1_369); \ + __ret_369; \ }) #ifdef __LITTLE_ENDIAN__ #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ @@ -45866,518 +47595,535 @@ __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p8(__p0_349, __p1_349) __extension__ ({ \ - poly8x8_t __ret_349; \ - poly8x16_t __s0_349 = __p0_349; \ - __ret_349 = splat_laneq_p8(__s0_349, __p1_349); \ - __ret_349; \ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ + __ret; \ }) #else -#define vdup_laneq_p8(__p0_350, __p1_350) __extension__ ({ \ - poly8x8_t __ret_350; \ - poly8x16_t __s0_350 = __p0_350; \ - poly8x16_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_350 = __noswap_splat_laneq_p8(__rev0_350, __p1_350); \ - __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_350; \ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p64(__p0_351, __p1_351) __extension__ ({ \ - poly64x1_t __ret_351; \ - poly64x2_t __s0_351 = __p0_351; \ - __ret_351 = splat_laneq_p64(__s0_351, __p1_351); \ - __ret_351; \ -}) -#else -#define vdup_laneq_p64(__p0_352, __p1_352) __extension__ ({ \ - poly64x1_t __ret_352; \ - poly64x2_t __s0_352 = __p0_352; \ - poly64x2_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 1, 0); \ - __ret_352 = __noswap_splat_laneq_p64(__rev0_352, __p1_352); \ - __ret_352; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p16(__p0_353, __p1_353) __extension__ ({ \ - poly16x4_t __ret_353; \ - poly16x8_t __s0_353 = __p0_353; \ - __ret_353 = splat_laneq_p16(__s0_353, __p1_353); \ - __ret_353; \ -}) -#else -#define vdup_laneq_p16(__p0_354, __p1_354) __extension__ ({ \ - poly16x4_t __ret_354; \ - poly16x8_t __s0_354 = __p0_354; \ - poly16x8_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_354 = __noswap_splat_laneq_p16(__rev0_354, __p1_354); \ - __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 3, 2, 1, 0); \ - __ret_354; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p8(__p0_355, __p1_355) __extension__ ({ \ - poly8x16_t __ret_355; \ - poly8x16_t __s0_355 = __p0_355; \ - __ret_355 = splatq_laneq_p8(__s0_355, __p1_355); \ - __ret_355; \ -}) -#else -#define vdupq_laneq_p8(__p0_356, __p1_356) __extension__ ({ \ - poly8x16_t __ret_356; \ - poly8x16_t __s0_356 = __p0_356; \ - poly8x16_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_356 = __noswap_splatq_laneq_p8(__rev0_356, __p1_356); \ - __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_356; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p64(__p0_357, __p1_357) __extension__ ({ \ - poly64x2_t __ret_357; \ - poly64x2_t __s0_357 = __p0_357; \ - __ret_357 = splatq_laneq_p64(__s0_357, __p1_357); \ - __ret_357; \ -}) -#else -#define vdupq_laneq_p64(__p0_358, __p1_358) __extension__ ({ \ - poly64x2_t __ret_358; \ - poly64x2_t __s0_358 = __p0_358; \ - poly64x2_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 1, 0); \ - __ret_358 = __noswap_splatq_laneq_p64(__rev0_358, __p1_358); \ - __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 1, 0); \ - __ret_358; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p16(__p0_359, __p1_359) __extension__ ({ \ - poly16x8_t __ret_359; \ - poly16x8_t __s0_359 = __p0_359; \ - __ret_359 = splatq_laneq_p16(__s0_359, __p1_359); \ - __ret_359; \ -}) -#else -#define vdupq_laneq_p16(__p0_360, __p1_360) __extension__ ({ \ - poly16x8_t __ret_360; \ - poly16x8_t __s0_360 = __p0_360; \ - poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_360 = __noswap_splatq_laneq_p16(__rev0_360, __p1_360); \ - __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_360; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u8(__p0_361, __p1_361) __extension__ ({ \ - uint8x16_t __ret_361; \ - uint8x16_t __s0_361 = __p0_361; \ - __ret_361 = splatq_laneq_u8(__s0_361, __p1_361); \ - __ret_361; \ -}) -#else -#define vdupq_laneq_u8(__p0_362, __p1_362) __extension__ ({ \ - uint8x16_t __ret_362; \ - uint8x16_t __s0_362 = __p0_362; \ - uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362 = __noswap_splatq_laneq_u8(__rev0_362, __p1_362); \ - __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u32(__p0_363, __p1_363) __extension__ ({ \ - uint32x4_t __ret_363; \ - uint32x4_t __s0_363 = __p0_363; \ - __ret_363 = splatq_laneq_u32(__s0_363, __p1_363); \ - __ret_363; \ -}) -#else -#define vdupq_laneq_u32(__p0_364, __p1_364) __extension__ ({ \ - uint32x4_t __ret_364; \ - uint32x4_t __s0_364 = __p0_364; \ - uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ - __ret_364 = __noswap_splatq_laneq_u32(__rev0_364, __p1_364); \ - __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ - __ret_364; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u64(__p0_365, __p1_365) __extension__ ({ \ - uint64x2_t __ret_365; \ - uint64x2_t __s0_365 = __p0_365; \ - __ret_365 = splatq_laneq_u64(__s0_365, __p1_365); \ - __ret_365; \ -}) -#else -#define vdupq_laneq_u64(__p0_366, __p1_366) __extension__ ({ \ - uint64x2_t __ret_366; \ - uint64x2_t __s0_366 = __p0_366; \ - uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ - __ret_366 = __noswap_splatq_laneq_u64(__rev0_366, __p1_366); \ - __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ - __ret_366; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u16(__p0_367, __p1_367) __extension__ ({ \ - uint16x8_t __ret_367; \ - uint16x8_t __s0_367 = __p0_367; \ - __ret_367 = splatq_laneq_u16(__s0_367, __p1_367); \ - __ret_367; \ -}) -#else -#define vdupq_laneq_u16(__p0_368, __p1_368) __extension__ ({ \ - uint16x8_t __ret_368; \ - uint16x8_t __s0_368 = __p0_368; \ - uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_368 = __noswap_splatq_laneq_u16(__rev0_368, __p1_368); \ - __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_368; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s8(__p0_369, __p1_369) __extension__ ({ \ - int8x16_t __ret_369; \ - int8x16_t __s0_369 = __p0_369; \ - __ret_369 = splatq_laneq_s8(__s0_369, __p1_369); \ - __ret_369; \ -}) -#else -#define vdupq_laneq_s8(__p0_370, __p1_370) __extension__ ({ \ - int8x16_t __ret_370; \ - int8x16_t __s0_370 = __p0_370; \ - int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_370 = __noswap_splatq_laneq_s8(__rev0_370, __p1_370); \ - __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdup_laneq_p8(__p0_370, __p1_370) __extension__ ({ \ + poly8x8_t __ret_370; \ + poly8x16_t __s0_370 = __p0_370; \ + __ret_370 = splat_laneq_p8(__s0_370, __p1_370); \ __ret_370; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f64(__p0_371, __p1_371) __extension__ ({ \ - float64x2_t __ret_371; \ - float64x2_t __s0_371 = __p0_371; \ - __ret_371 = splatq_laneq_f64(__s0_371, __p1_371); \ +#else +#define vdup_laneq_p8(__p0_371, __p1_371) __extension__ ({ \ + poly8x8_t __ret_371; \ + poly8x16_t __s0_371 = __p0_371; \ + poly8x16_t __rev0_371; __rev0_371 = __builtin_shufflevector(__s0_371, __s0_371, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_371 = __noswap_splat_laneq_p8(__rev0_371, __p1_371); \ + __ret_371 = __builtin_shufflevector(__ret_371, __ret_371, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_371; \ }) -#else -#define vdupq_laneq_f64(__p0_372, __p1_372) __extension__ ({ \ - float64x2_t __ret_372; \ - float64x2_t __s0_372 = __p0_372; \ - float64x2_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 1, 0); \ - __ret_372 = __noswap_splatq_laneq_f64(__rev0_372, __p1_372); \ - __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p64(__p0_372, __p1_372) __extension__ ({ \ + poly64x1_t __ret_372; \ + poly64x2_t __s0_372 = __p0_372; \ + __ret_372 = splat_laneq_p64(__s0_372, __p1_372); \ __ret_372; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f32(__p0_373, __p1_373) __extension__ ({ \ - float32x4_t __ret_373; \ - float32x4_t __s0_373 = __p0_373; \ - __ret_373 = splatq_laneq_f32(__s0_373, __p1_373); \ +#else +#define vdup_laneq_p64(__p0_373, __p1_373) __extension__ ({ \ + poly64x1_t __ret_373; \ + poly64x2_t __s0_373 = __p0_373; \ + poly64x2_t __rev0_373; __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 1, 0); \ + __ret_373 = __noswap_splat_laneq_p64(__rev0_373, __p1_373); \ __ret_373; \ }) -#else -#define vdupq_laneq_f32(__p0_374, __p1_374) __extension__ ({ \ - float32x4_t __ret_374; \ - float32x4_t __s0_374 = __p0_374; \ - float32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ - __ret_374 = __noswap_splatq_laneq_f32(__rev0_374, __p1_374); \ - __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p16(__p0_374, __p1_374) __extension__ ({ \ + poly16x4_t __ret_374; \ + poly16x8_t __s0_374 = __p0_374; \ + __ret_374 = splat_laneq_p16(__s0_374, __p1_374); \ __ret_374; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f16(__p0_375, __p1_375) __extension__ ({ \ - float16x8_t __ret_375; \ - float16x8_t __s0_375 = __p0_375; \ - __ret_375 = splatq_laneq_f16(__s0_375, __p1_375); \ +#else +#define vdup_laneq_p16(__p0_375, __p1_375) __extension__ ({ \ + poly16x4_t __ret_375; \ + poly16x8_t __s0_375 = __p0_375; \ + poly16x8_t __rev0_375; __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_375 = __noswap_splat_laneq_p16(__rev0_375, __p1_375); \ + __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 3, 2, 1, 0); \ __ret_375; \ }) -#else -#define vdupq_laneq_f16(__p0_376, __p1_376) __extension__ ({ \ - float16x8_t __ret_376; \ - float16x8_t __s0_376 = __p0_376; \ - float16x8_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_376 = __noswap_splatq_laneq_f16(__rev0_376, __p1_376); \ - __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p8(__p0_376, __p1_376) __extension__ ({ \ + poly8x16_t __ret_376; \ + poly8x16_t __s0_376 = __p0_376; \ + __ret_376 = splatq_laneq_p8(__s0_376, __p1_376); \ __ret_376; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s32(__p0_377, __p1_377) __extension__ ({ \ - int32x4_t __ret_377; \ - int32x4_t __s0_377 = __p0_377; \ - __ret_377 = splatq_laneq_s32(__s0_377, __p1_377); \ +#else +#define vdupq_laneq_p8(__p0_377, __p1_377) __extension__ ({ \ + poly8x16_t __ret_377; \ + poly8x16_t __s0_377 = __p0_377; \ + poly8x16_t __rev0_377; __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_377 = __noswap_splatq_laneq_p8(__rev0_377, __p1_377); \ + __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_377; \ }) -#else -#define vdupq_laneq_s32(__p0_378, __p1_378) __extension__ ({ \ - int32x4_t __ret_378; \ - int32x4_t __s0_378 = __p0_378; \ - int32x4_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 3, 2, 1, 0); \ - __ret_378 = __noswap_splatq_laneq_s32(__rev0_378, __p1_378); \ - __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p64(__p0_378, __p1_378) __extension__ ({ \ + poly64x2_t __ret_378; \ + poly64x2_t __s0_378 = __p0_378; \ + __ret_378 = splatq_laneq_p64(__s0_378, __p1_378); \ __ret_378; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s64(__p0_379, __p1_379) __extension__ ({ \ - int64x2_t __ret_379; \ - int64x2_t __s0_379 = __p0_379; \ - __ret_379 = splatq_laneq_s64(__s0_379, __p1_379); \ +#else +#define vdupq_laneq_p64(__p0_379, __p1_379) __extension__ ({ \ + poly64x2_t __ret_379; \ + poly64x2_t __s0_379 = __p0_379; \ + poly64x2_t __rev0_379; __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \ + __ret_379 = __noswap_splatq_laneq_p64(__rev0_379, __p1_379); \ + __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \ __ret_379; \ }) -#else -#define vdupq_laneq_s64(__p0_380, __p1_380) __extension__ ({ \ - int64x2_t __ret_380; \ - int64x2_t __s0_380 = __p0_380; \ - int64x2_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 1, 0); \ - __ret_380 = __noswap_splatq_laneq_s64(__rev0_380, __p1_380); \ - __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p16(__p0_380, __p1_380) __extension__ ({ \ + poly16x8_t __ret_380; \ + poly16x8_t __s0_380 = __p0_380; \ + __ret_380 = splatq_laneq_p16(__s0_380, __p1_380); \ __ret_380; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s16(__p0_381, __p1_381) __extension__ ({ \ - int16x8_t __ret_381; \ - int16x8_t __s0_381 = __p0_381; \ - __ret_381 = splatq_laneq_s16(__s0_381, __p1_381); \ +#else +#define vdupq_laneq_p16(__p0_381, __p1_381) __extension__ ({ \ + poly16x8_t __ret_381; \ + poly16x8_t __s0_381 = __p0_381; \ + poly16x8_t __rev0_381; __rev0_381 = __builtin_shufflevector(__s0_381, __s0_381, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_381 = __noswap_splatq_laneq_p16(__rev0_381, __p1_381); \ + __ret_381 = __builtin_shufflevector(__ret_381, __ret_381, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_381; \ }) -#else -#define vdupq_laneq_s16(__p0_382, __p1_382) __extension__ ({ \ - int16x8_t __ret_382; \ - int16x8_t __s0_382 = __p0_382; \ - int16x8_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_382 = __noswap_splatq_laneq_s16(__rev0_382, __p1_382); \ - __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u8(__p0_382, __p1_382) __extension__ ({ \ + uint8x16_t __ret_382; \ + uint8x16_t __s0_382 = __p0_382; \ + __ret_382 = splatq_laneq_u8(__s0_382, __p1_382); \ __ret_382; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ - uint8x8_t __ret_383; \ +#else +#define vdupq_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ + uint8x16_t __ret_383; \ uint8x16_t __s0_383 = __p0_383; \ - __ret_383 = splat_laneq_u8(__s0_383, __p1_383); \ + uint8x16_t __rev0_383; __rev0_383 = __builtin_shufflevector(__s0_383, __s0_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_383 = __noswap_splatq_laneq_u8(__rev0_383, __p1_383); \ + __ret_383 = __builtin_shufflevector(__ret_383, __ret_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_383; \ }) -#else -#define vdup_laneq_u8(__p0_384, __p1_384) __extension__ ({ \ - uint8x8_t __ret_384; \ - uint8x16_t __s0_384 = __p0_384; \ - uint8x16_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_384 = __noswap_splat_laneq_u8(__rev0_384, __p1_384); \ - __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u32(__p0_384, __p1_384) __extension__ ({ \ + uint32x4_t __ret_384; \ + uint32x4_t __s0_384 = __p0_384; \ + __ret_384 = splatq_laneq_u32(__s0_384, __p1_384); \ __ret_384; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ - uint32x2_t __ret_385; \ +#else +#define vdupq_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ + uint32x4_t __ret_385; \ uint32x4_t __s0_385 = __p0_385; \ - __ret_385 = splat_laneq_u32(__s0_385, __p1_385); \ + uint32x4_t __rev0_385; __rev0_385 = __builtin_shufflevector(__s0_385, __s0_385, 3, 2, 1, 0); \ + __ret_385 = __noswap_splatq_laneq_u32(__rev0_385, __p1_385); \ + __ret_385 = __builtin_shufflevector(__ret_385, __ret_385, 3, 2, 1, 0); \ __ret_385; \ }) -#else -#define vdup_laneq_u32(__p0_386, __p1_386) __extension__ ({ \ - uint32x2_t __ret_386; \ - uint32x4_t __s0_386 = __p0_386; \ - uint32x4_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 3, 2, 1, 0); \ - __ret_386 = __noswap_splat_laneq_u32(__rev0_386, __p1_386); \ - __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u64(__p0_386, __p1_386) __extension__ ({ \ + uint64x2_t __ret_386; \ + uint64x2_t __s0_386 = __p0_386; \ + __ret_386 = splatq_laneq_u64(__s0_386, __p1_386); \ __ret_386; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ - uint64x1_t __ret_387; \ +#else +#define vdupq_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ + uint64x2_t __ret_387; \ uint64x2_t __s0_387 = __p0_387; \ - __ret_387 = splat_laneq_u64(__s0_387, __p1_387); \ + uint64x2_t __rev0_387; __rev0_387 = __builtin_shufflevector(__s0_387, __s0_387, 1, 0); \ + __ret_387 = __noswap_splatq_laneq_u64(__rev0_387, __p1_387); \ + __ret_387 = __builtin_shufflevector(__ret_387, __ret_387, 1, 0); \ __ret_387; \ }) -#else -#define vdup_laneq_u64(__p0_388, __p1_388) __extension__ ({ \ - uint64x1_t __ret_388; \ - uint64x2_t __s0_388 = __p0_388; \ - uint64x2_t __rev0_388; __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 1, 0); \ - __ret_388 = __noswap_splat_laneq_u64(__rev0_388, __p1_388); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u16(__p0_388, __p1_388) __extension__ ({ \ + uint16x8_t __ret_388; \ + uint16x8_t __s0_388 = __p0_388; \ + __ret_388 = splatq_laneq_u16(__s0_388, __p1_388); \ __ret_388; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ - uint16x4_t __ret_389; \ +#else +#define vdupq_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ + uint16x8_t __ret_389; \ uint16x8_t __s0_389 = __p0_389; \ - __ret_389 = splat_laneq_u16(__s0_389, __p1_389); \ + uint16x8_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_389 = __noswap_splatq_laneq_u16(__rev0_389, __p1_389); \ + __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_389; \ }) -#else -#define vdup_laneq_u16(__p0_390, __p1_390) __extension__ ({ \ - uint16x4_t __ret_390; \ - uint16x8_t __s0_390 = __p0_390; \ - uint16x8_t __rev0_390; __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_390 = __noswap_splat_laneq_u16(__rev0_390, __p1_390); \ - __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s8(__p0_390, __p1_390) __extension__ ({ \ + int8x16_t __ret_390; \ + int8x16_t __s0_390 = __p0_390; \ + __ret_390 = splatq_laneq_s8(__s0_390, __p1_390); \ __ret_390; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ - int8x8_t __ret_391; \ +#else +#define vdupq_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ + int8x16_t __ret_391; \ int8x16_t __s0_391 = __p0_391; \ - __ret_391 = splat_laneq_s8(__s0_391, __p1_391); \ + int8x16_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_391 = __noswap_splatq_laneq_s8(__rev0_391, __p1_391); \ + __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_391; \ }) -#else -#define vdup_laneq_s8(__p0_392, __p1_392) __extension__ ({ \ - int8x8_t __ret_392; \ - int8x16_t __s0_392 = __p0_392; \ - int8x16_t __rev0_392; __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_392 = __noswap_splat_laneq_s8(__rev0_392, __p1_392); \ - __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 7, 6, 5, 4, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f64(__p0_392, __p1_392) __extension__ ({ \ + float64x2_t __ret_392; \ + float64x2_t __s0_392 = __p0_392; \ + __ret_392 = splatq_laneq_f64(__s0_392, __p1_392); \ __ret_392; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ - float64x1_t __ret_393; \ +#else +#define vdupq_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ + float64x2_t __ret_393; \ float64x2_t __s0_393 = __p0_393; \ - __ret_393 = splat_laneq_f64(__s0_393, __p1_393); \ + float64x2_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \ + __ret_393 = __noswap_splatq_laneq_f64(__rev0_393, __p1_393); \ + __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \ __ret_393; \ }) -#else -#define vdup_laneq_f64(__p0_394, __p1_394) __extension__ ({ \ - float64x1_t __ret_394; \ - float64x2_t __s0_394 = __p0_394; \ - float64x2_t __rev0_394; __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 1, 0); \ - __ret_394 = __noswap_splat_laneq_f64(__rev0_394, __p1_394); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f32(__p0_394, __p1_394) __extension__ ({ \ + float32x4_t __ret_394; \ + float32x4_t __s0_394 = __p0_394; \ + __ret_394 = splatq_laneq_f32(__s0_394, __p1_394); \ __ret_394; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ - float32x2_t __ret_395; \ +#else +#define vdupq_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ + float32x4_t __ret_395; \ float32x4_t __s0_395 = __p0_395; \ - __ret_395 = splat_laneq_f32(__s0_395, __p1_395); \ + float32x4_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 3, 2, 1, 0); \ + __ret_395 = __noswap_splatq_laneq_f32(__rev0_395, __p1_395); \ + __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 3, 2, 1, 0); \ __ret_395; \ }) -#else -#define vdup_laneq_f32(__p0_396, __p1_396) __extension__ ({ \ - float32x2_t __ret_396; \ - float32x4_t __s0_396 = __p0_396; \ - float32x4_t __rev0_396; __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \ - __ret_396 = __noswap_splat_laneq_f32(__rev0_396, __p1_396); \ - __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f16(__p0_396, __p1_396) __extension__ ({ \ + float16x8_t __ret_396; \ + float16x8_t __s0_396 = __p0_396; \ + __ret_396 = splatq_laneq_f16(__s0_396, __p1_396); \ __ret_396; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ - float16x4_t __ret_397; \ +#else +#define vdupq_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ + float16x8_t __ret_397; \ float16x8_t __s0_397 = __p0_397; \ - __ret_397 = splat_laneq_f16(__s0_397, __p1_397); \ + float16x8_t __rev0_397; __rev0_397 = __builtin_shufflevector(__s0_397, __s0_397, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_397 = __noswap_splatq_laneq_f16(__rev0_397, __p1_397); \ + __ret_397 = __builtin_shufflevector(__ret_397, __ret_397, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_397; \ }) -#else -#define vdup_laneq_f16(__p0_398, __p1_398) __extension__ ({ \ - float16x4_t __ret_398; \ - float16x8_t __s0_398 = __p0_398; \ - float16x8_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_398 = __noswap_splat_laneq_f16(__rev0_398, __p1_398); \ - __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0_398, __p1_398) __extension__ ({ \ + int32x4_t __ret_398; \ + int32x4_t __s0_398 = __p0_398; \ + __ret_398 = splatq_laneq_s32(__s0_398, __p1_398); \ __ret_398; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ - int32x2_t __ret_399; \ +#else +#define vdupq_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ + int32x4_t __ret_399; \ int32x4_t __s0_399 = __p0_399; \ - __ret_399 = splat_laneq_s32(__s0_399, __p1_399); \ + int32x4_t __rev0_399; __rev0_399 = __builtin_shufflevector(__s0_399, __s0_399, 3, 2, 1, 0); \ + __ret_399 = __noswap_splatq_laneq_s32(__rev0_399, __p1_399); \ + __ret_399 = __builtin_shufflevector(__ret_399, __ret_399, 3, 2, 1, 0); \ __ret_399; \ }) -#else -#define vdup_laneq_s32(__p0_400, __p1_400) __extension__ ({ \ - int32x2_t __ret_400; \ - int32x4_t __s0_400 = __p0_400; \ - int32x4_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \ - __ret_400 = __noswap_splat_laneq_s32(__rev0_400, __p1_400); \ - __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s64(__p0_400, __p1_400) __extension__ ({ \ + int64x2_t __ret_400; \ + int64x2_t __s0_400 = __p0_400; \ + __ret_400 = splatq_laneq_s64(__s0_400, __p1_400); \ __ret_400; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ - int64x1_t __ret_401; \ +#else +#define vdupq_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ + int64x2_t __ret_401; \ int64x2_t __s0_401 = __p0_401; \ - __ret_401 = splat_laneq_s64(__s0_401, __p1_401); \ + int64x2_t __rev0_401; __rev0_401 = __builtin_shufflevector(__s0_401, __s0_401, 1, 0); \ + __ret_401 = __noswap_splatq_laneq_s64(__rev0_401, __p1_401); \ + __ret_401 = __builtin_shufflevector(__ret_401, __ret_401, 1, 0); \ __ret_401; \ }) -#else -#define vdup_laneq_s64(__p0_402, __p1_402) __extension__ ({ \ - int64x1_t __ret_402; \ - int64x2_t __s0_402 = __p0_402; \ - int64x2_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \ - __ret_402 = __noswap_splat_laneq_s64(__rev0_402, __p1_402); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s16(__p0_402, __p1_402) __extension__ ({ \ + int16x8_t __ret_402; \ + int16x8_t __s0_402 = __p0_402; \ + __ret_402 = splatq_laneq_s16(__s0_402, __p1_402); \ __ret_402; \ }) +#else +#define vdupq_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ + int16x8_t __ret_403; \ + int16x8_t __s0_403 = __p0_403; \ + int16x8_t __rev0_403; __rev0_403 = __builtin_shufflevector(__s0_403, __s0_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403 = __noswap_splatq_laneq_s16(__rev0_403, __p1_403); \ + __ret_403 = __builtin_shufflevector(__ret_403, __ret_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ - int16x4_t __ret_403; \ - int16x8_t __s0_403 = __p0_403; \ - __ret_403 = splat_laneq_s16(__s0_403, __p1_403); \ - __ret_403; \ +#define vdup_laneq_u8(__p0_404, __p1_404) __extension__ ({ \ + uint8x8_t __ret_404; \ + uint8x16_t __s0_404 = __p0_404; \ + __ret_404 = splat_laneq_u8(__s0_404, __p1_404); \ + __ret_404; \ }) #else -#define vdup_laneq_s16(__p0_404, __p1_404) __extension__ ({ \ - int16x4_t __ret_404; \ - int16x8_t __s0_404 = __p0_404; \ - int16x8_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_404 = __noswap_splat_laneq_s16(__rev0_404, __p1_404); \ - __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 3, 2, 1, 0); \ - __ret_404; \ +#define vdup_laneq_u8(__p0_405, __p1_405) __extension__ ({ \ + uint8x8_t __ret_405; \ + uint8x16_t __s0_405 = __p0_405; \ + uint8x16_t __rev0_405; __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405 = __noswap_splat_laneq_u8(__rev0_405, __p1_405); \ + __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405; \ }) #endif -__ai poly64x1_t vdup_n_p64(poly64_t __p0) { +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u32(__p0_406, __p1_406) __extension__ ({ \ + uint32x2_t __ret_406; \ + uint32x4_t __s0_406 = __p0_406; \ + __ret_406 = splat_laneq_u32(__s0_406, __p1_406); \ + __ret_406; \ +}) +#else +#define vdup_laneq_u32(__p0_407, __p1_407) __extension__ ({ \ + uint32x2_t __ret_407; \ + uint32x4_t __s0_407 = __p0_407; \ + uint32x4_t __rev0_407; __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \ + __ret_407 = __noswap_splat_laneq_u32(__rev0_407, __p1_407); \ + __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 1, 0); \ + __ret_407; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u64(__p0_408, __p1_408) __extension__ ({ \ + uint64x1_t __ret_408; \ + uint64x2_t __s0_408 = __p0_408; \ + __ret_408 = splat_laneq_u64(__s0_408, __p1_408); \ + __ret_408; \ +}) +#else +#define vdup_laneq_u64(__p0_409, __p1_409) __extension__ ({ \ + uint64x1_t __ret_409; \ + uint64x2_t __s0_409 = __p0_409; \ + uint64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ + __ret_409 = __noswap_splat_laneq_u64(__rev0_409, __p1_409); \ + __ret_409; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u16(__p0_410, __p1_410) __extension__ ({ \ + uint16x4_t __ret_410; \ + uint16x8_t __s0_410 = __p0_410; \ + __ret_410 = splat_laneq_u16(__s0_410, __p1_410); \ + __ret_410; \ +}) +#else +#define vdup_laneq_u16(__p0_411, __p1_411) __extension__ ({ \ + uint16x4_t __ret_411; \ + uint16x8_t __s0_411 = __p0_411; \ + uint16x8_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_411 = __noswap_splat_laneq_u16(__rev0_411, __p1_411); \ + __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ + __ret_411; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s8(__p0_412, __p1_412) __extension__ ({ \ + int8x8_t __ret_412; \ + int8x16_t __s0_412 = __p0_412; \ + __ret_412 = splat_laneq_s8(__s0_412, __p1_412); \ + __ret_412; \ +}) +#else +#define vdup_laneq_s8(__p0_413, __p1_413) __extension__ ({ \ + int8x8_t __ret_413; \ + int8x16_t __s0_413 = __p0_413; \ + int8x16_t __rev0_413; __rev0_413 = __builtin_shufflevector(__s0_413, __s0_413, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413 = __noswap_splat_laneq_s8(__rev0_413, __p1_413); \ + __ret_413 = __builtin_shufflevector(__ret_413, __ret_413, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0_414, __p1_414) __extension__ ({ \ + float64x1_t __ret_414; \ + float64x2_t __s0_414 = __p0_414; \ + __ret_414 = splat_laneq_f64(__s0_414, __p1_414); \ + __ret_414; \ +}) +#else +#define vdup_laneq_f64(__p0_415, __p1_415) __extension__ ({ \ + float64x1_t __ret_415; \ + float64x2_t __s0_415 = __p0_415; \ + float64x2_t __rev0_415; __rev0_415 = __builtin_shufflevector(__s0_415, __s0_415, 1, 0); \ + __ret_415 = __noswap_splat_laneq_f64(__rev0_415, __p1_415); \ + __ret_415; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0_416, __p1_416) __extension__ ({ \ + float32x2_t __ret_416; \ + float32x4_t __s0_416 = __p0_416; \ + __ret_416 = splat_laneq_f32(__s0_416, __p1_416); \ + __ret_416; \ +}) +#else +#define vdup_laneq_f32(__p0_417, __p1_417) __extension__ ({ \ + float32x2_t __ret_417; \ + float32x4_t __s0_417 = __p0_417; \ + float32x4_t __rev0_417; __rev0_417 = __builtin_shufflevector(__s0_417, __s0_417, 3, 2, 1, 0); \ + __ret_417 = __noswap_splat_laneq_f32(__rev0_417, __p1_417); \ + __ret_417 = __builtin_shufflevector(__ret_417, __ret_417, 1, 0); \ + __ret_417; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f16(__p0_418, __p1_418) __extension__ ({ \ + float16x4_t __ret_418; \ + float16x8_t __s0_418 = __p0_418; \ + __ret_418 = splat_laneq_f16(__s0_418, __p1_418); \ + __ret_418; \ +}) +#else +#define vdup_laneq_f16(__p0_419, __p1_419) __extension__ ({ \ + float16x4_t __ret_419; \ + float16x8_t __s0_419 = __p0_419; \ + float16x8_t __rev0_419; __rev0_419 = __builtin_shufflevector(__s0_419, __s0_419, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_419 = __noswap_splat_laneq_f16(__rev0_419, __p1_419); \ + __ret_419 = __builtin_shufflevector(__ret_419, __ret_419, 3, 2, 1, 0); \ + __ret_419; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s32(__p0_420, __p1_420) __extension__ ({ \ + int32x2_t __ret_420; \ + int32x4_t __s0_420 = __p0_420; \ + __ret_420 = splat_laneq_s32(__s0_420, __p1_420); \ + __ret_420; \ +}) +#else +#define vdup_laneq_s32(__p0_421, __p1_421) __extension__ ({ \ + int32x2_t __ret_421; \ + int32x4_t __s0_421 = __p0_421; \ + int32x4_t __rev0_421; __rev0_421 = __builtin_shufflevector(__s0_421, __s0_421, 3, 2, 1, 0); \ + __ret_421 = __noswap_splat_laneq_s32(__rev0_421, __p1_421); \ + __ret_421 = __builtin_shufflevector(__ret_421, __ret_421, 1, 0); \ + __ret_421; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s64(__p0_422, __p1_422) __extension__ ({ \ + int64x1_t __ret_422; \ + int64x2_t __s0_422 = __p0_422; \ + __ret_422 = splat_laneq_s64(__s0_422, __p1_422); \ + __ret_422; \ +}) +#else +#define vdup_laneq_s64(__p0_423, __p1_423) __extension__ ({ \ + int64x1_t __ret_423; \ + int64x2_t __s0_423 = __p0_423; \ + int64x2_t __rev0_423; __rev0_423 = __builtin_shufflevector(__s0_423, __s0_423, 1, 0); \ + __ret_423 = __noswap_splat_laneq_s64(__rev0_423, __p1_423); \ + __ret_423; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s16(__p0_424, __p1_424) __extension__ ({ \ + int16x4_t __ret_424; \ + int16x8_t __s0_424 = __p0_424; \ + __ret_424 = splat_laneq_s16(__s0_424, __p1_424); \ + __ret_424; \ +}) +#else +#define vdup_laneq_s16(__p0_425, __p1_425) __extension__ ({ \ + int16x4_t __ret_425; \ + int16x8_t __s0_425 = __p0_425; \ + int16x8_t __rev0_425; __rev0_425 = __builtin_shufflevector(__s0_425, __s0_425, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_425 = __noswap_splat_laneq_s16(__rev0_425, __p1_425); \ + __ret_425 = __builtin_shufflevector(__ret_425, __ret_425, 3, 2, 1, 0); \ + __ret_425; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vdup_n_p64(poly64_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; return __ret; } #else -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -46386,13 +48132,13 @@ __ai poly64x2_t vdupq_n_p64(poly64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdupq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; return __ret; } #else -__ai float64x2_t vdupq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -46400,7 +48146,7 @@ __ai float64x2_t vdupq_n_f64(float64_t __p0) { } #endif -__ai float64x1_t vdup_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vdup_n_f64(float64_t __p0) { float64x1_t __ret; __ret = (float64x1_t) {__p0}; return __ret; @@ -46462,13 +48208,13 @@ __ai float64x1_t vdup_n_f64(float64_t __p0) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -46477,14 +48223,14 @@ __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; @@ -46813,13 +48559,13 @@ __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); return __ret; } #else -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -46829,19 +48575,19 @@ __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) } #endif -__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, -__p1, __p2); return __ret; } #else -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -46852,262 +48598,262 @@ __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) } #endif -__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, -__p1, __p2); return __ret; } -#define vfmsd_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ - float64_t __ret_405; \ - float64_t __s0_405 = __p0_405; \ - float64_t __s1_405 = __p1_405; \ - float64x1_t __s2_405 = __p2_405; \ - __ret_405 = vfmad_lane_f64(__s0_405, -__s1_405, __s2_405, __p3_405); \ - __ret_405; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vfmss_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ - float32_t __ret_406; \ - float32_t __s0_406 = __p0_406; \ - float32_t __s1_406 = __p1_406; \ - float32x2_t __s2_406 = __p2_406; \ - __ret_406 = vfmas_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \ - __ret_406; \ -}) -#else -#define vfmss_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ - float32_t __ret_407; \ - float32_t __s0_407 = __p0_407; \ - float32_t __s1_407 = __p1_407; \ - float32x2_t __s2_407 = __p2_407; \ - float32x2_t __rev2_407; __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \ - __ret_407 = __noswap_vfmas_lane_f32(__s0_407, -__s1_407, __rev2_407, __p3_407); \ - __ret_407; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ - float64x2_t __ret_408; \ - float64x2_t __s0_408 = __p0_408; \ - float64x2_t __s1_408 = __p1_408; \ - float64x1_t __s2_408 = __p2_408; \ - __ret_408 = vfmaq_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \ - __ret_408; \ -}) -#else -#define vfmsq_lane_f64(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ - float64x2_t __ret_409; \ - float64x2_t __s0_409 = __p0_409; \ - float64x2_t __s1_409 = __p1_409; \ - float64x1_t __s2_409 = __p2_409; \ - float64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ - float64x2_t __rev1_409; __rev1_409 = __builtin_shufflevector(__s1_409, __s1_409, 1, 0); \ - __ret_409 = __noswap_vfmaq_lane_f64(__rev0_409, -__rev1_409, __s2_409, __p3_409); \ - __ret_409 = __builtin_shufflevector(__ret_409, __ret_409, 1, 0); \ - __ret_409; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ - float32x4_t __ret_410; \ - float32x4_t __s0_410 = __p0_410; \ - float32x4_t __s1_410 = __p1_410; \ - float32x2_t __s2_410 = __p2_410; \ - __ret_410 = vfmaq_lane_f32(__s0_410, -__s1_410, __s2_410, __p3_410); \ - __ret_410; \ -}) -#else -#define vfmsq_lane_f32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ - float32x4_t __ret_411; \ - float32x4_t __s0_411 = __p0_411; \ - float32x4_t __s1_411 = __p1_411; \ - float32x2_t __s2_411 = __p2_411; \ - float32x4_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 3, 2, 1, 0); \ - float32x4_t __rev1_411; __rev1_411 = __builtin_shufflevector(__s1_411, __s1_411, 3, 2, 1, 0); \ - float32x2_t __rev2_411; __rev2_411 = __builtin_shufflevector(__s2_411, __s2_411, 1, 0); \ - __ret_411 = __noswap_vfmaq_lane_f32(__rev0_411, -__rev1_411, __rev2_411, __p3_411); \ - __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ - __ret_411; \ -}) -#endif - -#define vfms_lane_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ - float64x1_t __ret_412; \ - float64x1_t __s0_412 = __p0_412; \ - float64x1_t __s1_412 = __p1_412; \ - float64x1_t __s2_412 = __p2_412; \ - __ret_412 = vfma_lane_f64(__s0_412, -__s1_412, __s2_412, __p3_412); \ - __ret_412; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ - float32x2_t __ret_413; \ - float32x2_t __s0_413 = __p0_413; \ - float32x2_t __s1_413 = __p1_413; \ - float32x2_t __s2_413 = __p2_413; \ - __ret_413 = vfma_lane_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \ - __ret_413; \ -}) -#else -#define vfms_lane_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ - float32x2_t __ret_414; \ - float32x2_t __s0_414 = __p0_414; \ - float32x2_t __s1_414 = __p1_414; \ - float32x2_t __s2_414 = __p2_414; \ - float32x2_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 1, 0); \ - float32x2_t __rev1_414; __rev1_414 = __builtin_shufflevector(__s1_414, __s1_414, 1, 0); \ - float32x2_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \ - __ret_414 = __noswap_vfma_lane_f32(__rev0_414, -__rev1_414, __rev2_414, __p3_414); \ - __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 1, 0); \ - __ret_414; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsd_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ - float64_t __ret_415; \ - float64_t __s0_415 = __p0_415; \ - float64_t __s1_415 = __p1_415; \ - float64x2_t __s2_415 = __p2_415; \ - __ret_415 = vfmad_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \ - __ret_415; \ -}) -#else -#define vfmsd_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ - float64_t __ret_416; \ - float64_t __s0_416 = __p0_416; \ - float64_t __s1_416 = __p1_416; \ - float64x2_t __s2_416 = __p2_416; \ - float64x2_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \ - __ret_416 = __noswap_vfmad_laneq_f64(__s0_416, -__s1_416, __rev2_416, __p3_416); \ - __ret_416; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmss_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ - float32_t __ret_417; \ - float32_t __s0_417 = __p0_417; \ - float32_t __s1_417 = __p1_417; \ - float32x4_t __s2_417 = __p2_417; \ - __ret_417 = vfmas_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \ - __ret_417; \ -}) -#else -#define vfmss_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ - float32_t __ret_418; \ - float32_t __s0_418 = __p0_418; \ - float32_t __s1_418 = __p1_418; \ - float32x4_t __s2_418 = __p2_418; \ - float32x4_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \ - __ret_418 = __noswap_vfmas_laneq_f32(__s0_418, -__s1_418, __rev2_418, __p3_418); \ - __ret_418; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ - float64x2_t __ret_419; \ - float64x2_t __s0_419 = __p0_419; \ - float64x2_t __s1_419 = __p1_419; \ - float64x2_t __s2_419 = __p2_419; \ - __ret_419 = vfmaq_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \ - __ret_419; \ -}) -#else -#define vfmsq_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ - float64x2_t __ret_420; \ - float64x2_t __s0_420 = __p0_420; \ - float64x2_t __s1_420 = __p1_420; \ - float64x2_t __s2_420 = __p2_420; \ - float64x2_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \ - float64x2_t __rev1_420; __rev1_420 = __builtin_shufflevector(__s1_420, __s1_420, 1, 0); \ - float64x2_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \ - __ret_420 = __noswap_vfmaq_laneq_f64(__rev0_420, -__rev1_420, __rev2_420, __p3_420); \ - __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \ - __ret_420; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ - float32x4_t __ret_421; \ - float32x4_t __s0_421 = __p0_421; \ - float32x4_t __s1_421 = __p1_421; \ - float32x4_t __s2_421 = __p2_421; \ - __ret_421 = vfmaq_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \ - __ret_421; \ -}) -#else -#define vfmsq_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ - float32x4_t __ret_422; \ - float32x4_t __s0_422 = __p0_422; \ - float32x4_t __s1_422 = __p1_422; \ - float32x4_t __s2_422 = __p2_422; \ - float32x4_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 3, 2, 1, 0); \ - float32x4_t __rev1_422; __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 3, 2, 1, 0); \ - float32x4_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \ - __ret_422 = __noswap_vfmaq_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \ - __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 3, 2, 1, 0); \ - __ret_422; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ - float64x1_t __ret_423; \ - float64x1_t __s0_423 = __p0_423; \ - float64x1_t __s1_423 = __p1_423; \ - float64x2_t __s2_423 = __p2_423; \ - __ret_423 = vfma_laneq_f64(__s0_423, -__s1_423, __s2_423, __p3_423); \ - __ret_423; \ -}) -#else -#define vfms_laneq_f64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ - float64x1_t __ret_424; \ - float64x1_t __s0_424 = __p0_424; \ - float64x1_t __s1_424 = __p1_424; \ - float64x2_t __s2_424 = __p2_424; \ - float64x2_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \ - __ret_424 = __noswap_vfma_laneq_f64(__s0_424, -__s1_424, __rev2_424, __p3_424); \ - __ret_424; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f32(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ - float32x2_t __ret_425; \ - float32x2_t __s0_425 = __p0_425; \ - float32x2_t __s1_425 = __p1_425; \ - float32x4_t __s2_425 = __p2_425; \ - __ret_425 = vfma_laneq_f32(__s0_425, -__s1_425, __s2_425, __p3_425); \ - __ret_425; \ -}) -#else -#define vfms_laneq_f32(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ - float32x2_t __ret_426; \ - float32x2_t __s0_426 = __p0_426; \ - float32x2_t __s1_426 = __p1_426; \ - float32x4_t __s2_426 = __p2_426; \ - float32x2_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 1, 0); \ - float32x2_t __rev1_426; __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 1, 0); \ - float32x4_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 3, 2, 1, 0); \ - __ret_426 = __noswap_vfma_laneq_f32(__rev0_426, -__rev1_426, __rev2_426, __p3_426); \ - __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 1, 0); \ +#define vfmsd_lane_f64(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + float64_t __ret_426; \ + float64_t __s0_426 = __p0_426; \ + float64_t __s1_426 = __p1_426; \ + float64x1_t __s2_426 = __p2_426; \ + __ret_426 = vfmad_lane_f64(__s0_426, -__s1_426, __s2_426, __p3_426); \ __ret_426; \ }) +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ + float32_t __ret_427; \ + float32_t __s0_427 = __p0_427; \ + float32_t __s1_427 = __p1_427; \ + float32x2_t __s2_427 = __p2_427; \ + __ret_427 = vfmas_lane_f32(__s0_427, -__s1_427, __s2_427, __p3_427); \ + __ret_427; \ +}) +#else +#define vfmss_lane_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ + float32_t __ret_428; \ + float32_t __s0_428 = __p0_428; \ + float32_t __s1_428 = __p1_428; \ + float32x2_t __s2_428 = __p2_428; \ + float32x2_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 1, 0); \ + __ret_428 = __noswap_vfmas_lane_f32(__s0_428, -__s1_428, __rev2_428, __p3_428); \ + __ret_428; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +#define vfmsq_lane_f64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ + float64x2_t __ret_429; \ + float64x2_t __s0_429 = __p0_429; \ + float64x2_t __s1_429 = __p1_429; \ + float64x1_t __s2_429 = __p2_429; \ + __ret_429 = vfmaq_lane_f64(__s0_429, -__s1_429, __s2_429, __p3_429); \ + __ret_429; \ +}) +#else +#define vfmsq_lane_f64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ + float64x2_t __ret_430; \ + float64x2_t __s0_430 = __p0_430; \ + float64x2_t __s1_430 = __p1_430; \ + float64x1_t __s2_430 = __p2_430; \ + float64x2_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 1, 0); \ + float64x2_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 1, 0); \ + __ret_430 = __noswap_vfmaq_lane_f64(__rev0_430, -__rev1_430, __s2_430, __p3_430); \ + __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 1, 0); \ + __ret_430; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ + float32x4_t __ret_431; \ + float32x4_t __s0_431 = __p0_431; \ + float32x4_t __s1_431 = __p1_431; \ + float32x2_t __s2_431 = __p2_431; \ + __ret_431 = vfmaq_lane_f32(__s0_431, -__s1_431, __s2_431, __p3_431); \ + __ret_431; \ +}) +#else +#define vfmsq_lane_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ + float32x4_t __ret_432; \ + float32x4_t __s0_432 = __p0_432; \ + float32x4_t __s1_432 = __p1_432; \ + float32x2_t __s2_432 = __p2_432; \ + float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ + float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ + float32x2_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 1, 0); \ + __ret_432 = __noswap_vfmaq_lane_f32(__rev0_432, -__rev1_432, __rev2_432, __p3_432); \ + __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ + __ret_432; \ +}) +#endif + +#define vfms_lane_f64(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ + float64x1_t __ret_433; \ + float64x1_t __s0_433 = __p0_433; \ + float64x1_t __s1_433 = __p1_433; \ + float64x1_t __s2_433 = __p2_433; \ + __ret_433 = vfma_lane_f64(__s0_433, -__s1_433, __s2_433, __p3_433); \ + __ret_433; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ + float32x2_t __ret_434; \ + float32x2_t __s0_434 = __p0_434; \ + float32x2_t __s1_434 = __p1_434; \ + float32x2_t __s2_434 = __p2_434; \ + __ret_434 = vfma_lane_f32(__s0_434, -__s1_434, __s2_434, __p3_434); \ + __ret_434; \ +}) +#else +#define vfms_lane_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ + float32x2_t __ret_435; \ + float32x2_t __s0_435 = __p0_435; \ + float32x2_t __s1_435 = __p1_435; \ + float32x2_t __s2_435 = __p2_435; \ + float32x2_t __rev0_435; __rev0_435 = __builtin_shufflevector(__s0_435, __s0_435, 1, 0); \ + float32x2_t __rev1_435; __rev1_435 = __builtin_shufflevector(__s1_435, __s1_435, 1, 0); \ + float32x2_t __rev2_435; __rev2_435 = __builtin_shufflevector(__s2_435, __s2_435, 1, 0); \ + __ret_435 = __noswap_vfma_lane_f32(__rev0_435, -__rev1_435, __rev2_435, __p3_435); \ + __ret_435 = __builtin_shufflevector(__ret_435, __ret_435, 1, 0); \ + __ret_435; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_laneq_f64(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ + float64_t __ret_436; \ + float64_t __s0_436 = __p0_436; \ + float64_t __s1_436 = __p1_436; \ + float64x2_t __s2_436 = __p2_436; \ + __ret_436 = vfmad_laneq_f64(__s0_436, -__s1_436, __s2_436, __p3_436); \ + __ret_436; \ +}) +#else +#define vfmsd_laneq_f64(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ + float64_t __ret_437; \ + float64_t __s0_437 = __p0_437; \ + float64_t __s1_437 = __p1_437; \ + float64x2_t __s2_437 = __p2_437; \ + float64x2_t __rev2_437; __rev2_437 = __builtin_shufflevector(__s2_437, __s2_437, 1, 0); \ + __ret_437 = __noswap_vfmad_laneq_f64(__s0_437, -__s1_437, __rev2_437, __p3_437); \ + __ret_437; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ + float32_t __ret_438; \ + float32_t __s0_438 = __p0_438; \ + float32_t __s1_438 = __p1_438; \ + float32x4_t __s2_438 = __p2_438; \ + __ret_438 = vfmas_laneq_f32(__s0_438, -__s1_438, __s2_438, __p3_438); \ + __ret_438; \ +}) +#else +#define vfmss_laneq_f32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ + float32_t __ret_439; \ + float32_t __s0_439 = __p0_439; \ + float32_t __s1_439 = __p1_439; \ + float32x4_t __s2_439 = __p2_439; \ + float32x4_t __rev2_439; __rev2_439 = __builtin_shufflevector(__s2_439, __s2_439, 3, 2, 1, 0); \ + __ret_439 = __noswap_vfmas_laneq_f32(__s0_439, -__s1_439, __rev2_439, __p3_439); \ + __ret_439; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ + float64x2_t __ret_440; \ + float64x2_t __s0_440 = __p0_440; \ + float64x2_t __s1_440 = __p1_440; \ + float64x2_t __s2_440 = __p2_440; \ + __ret_440 = vfmaq_laneq_f64(__s0_440, -__s1_440, __s2_440, __p3_440); \ + __ret_440; \ +}) +#else +#define vfmsq_laneq_f64(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ + float64x2_t __ret_441; \ + float64x2_t __s0_441 = __p0_441; \ + float64x2_t __s1_441 = __p1_441; \ + float64x2_t __s2_441 = __p2_441; \ + float64x2_t __rev0_441; __rev0_441 = __builtin_shufflevector(__s0_441, __s0_441, 1, 0); \ + float64x2_t __rev1_441; __rev1_441 = __builtin_shufflevector(__s1_441, __s1_441, 1, 0); \ + float64x2_t __rev2_441; __rev2_441 = __builtin_shufflevector(__s2_441, __s2_441, 1, 0); \ + __ret_441 = __noswap_vfmaq_laneq_f64(__rev0_441, -__rev1_441, __rev2_441, __p3_441); \ + __ret_441 = __builtin_shufflevector(__ret_441, __ret_441, 1, 0); \ + __ret_441; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ + float32x4_t __ret_442; \ + float32x4_t __s0_442 = __p0_442; \ + float32x4_t __s1_442 = __p1_442; \ + float32x4_t __s2_442 = __p2_442; \ + __ret_442 = vfmaq_laneq_f32(__s0_442, -__s1_442, __s2_442, __p3_442); \ + __ret_442; \ +}) +#else +#define vfmsq_laneq_f32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ + float32x4_t __ret_443; \ + float32x4_t __s0_443 = __p0_443; \ + float32x4_t __s1_443 = __p1_443; \ + float32x4_t __s2_443 = __p2_443; \ + float32x4_t __rev0_443; __rev0_443 = __builtin_shufflevector(__s0_443, __s0_443, 3, 2, 1, 0); \ + float32x4_t __rev1_443; __rev1_443 = __builtin_shufflevector(__s1_443, __s1_443, 3, 2, 1, 0); \ + float32x4_t __rev2_443; __rev2_443 = __builtin_shufflevector(__s2_443, __s2_443, 3, 2, 1, 0); \ + __ret_443 = __noswap_vfmaq_laneq_f32(__rev0_443, -__rev1_443, __rev2_443, __p3_443); \ + __ret_443 = __builtin_shufflevector(__ret_443, __ret_443, 3, 2, 1, 0); \ + __ret_443; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f64(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ + float64x1_t __ret_444; \ + float64x1_t __s0_444 = __p0_444; \ + float64x1_t __s1_444 = __p1_444; \ + float64x2_t __s2_444 = __p2_444; \ + __ret_444 = vfma_laneq_f64(__s0_444, -__s1_444, __s2_444, __p3_444); \ + __ret_444; \ +}) +#else +#define vfms_laneq_f64(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ + float64x1_t __ret_445; \ + float64x1_t __s0_445 = __p0_445; \ + float64x1_t __s1_445 = __p1_445; \ + float64x2_t __s2_445 = __p2_445; \ + float64x2_t __rev2_445; __rev2_445 = __builtin_shufflevector(__s2_445, __s2_445, 1, 0); \ + __ret_445 = __noswap_vfma_laneq_f64(__s0_445, -__s1_445, __rev2_445, __p3_445); \ + __ret_445; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f32(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ + float32x2_t __ret_446; \ + float32x2_t __s0_446 = __p0_446; \ + float32x2_t __s1_446 = __p1_446; \ + float32x4_t __s2_446 = __p2_446; \ + __ret_446 = vfma_laneq_f32(__s0_446, -__s1_446, __s2_446, __p3_446); \ + __ret_446; \ +}) +#else +#define vfms_laneq_f32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ + float32x2_t __ret_447; \ + float32x2_t __s0_447 = __p0_447; \ + float32x2_t __s1_447 = __p1_447; \ + float32x4_t __s2_447 = __p2_447; \ + float32x2_t __rev0_447; __rev0_447 = __builtin_shufflevector(__s0_447, __s0_447, 1, 0); \ + float32x2_t __rev1_447; __rev1_447 = __builtin_shufflevector(__s1_447, __s1_447, 1, 0); \ + float32x4_t __rev2_447; __rev2_447 = __builtin_shufflevector(__s2_447, __s2_447, 3, 2, 1, 0); \ + __ret_447 = __noswap_vfma_laneq_f32(__rev0_447, -__rev1_447, __rev2_447, __p3_447); \ + __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \ + __ret_447; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); return __ret; } #else -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -47118,13 +48864,13 @@ __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -47134,19 +48880,19 @@ __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) } #endif -__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { float64x1_t __ret; __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); return __ret; } #else -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -47157,19 +48903,19 @@ __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } -__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; @@ -47177,13 +48923,13 @@ __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_high_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { float64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai float64x1_t vget_high_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { float64x1_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 1); @@ -47250,13 +48996,13 @@ __ai float64x1_t vget_high_f64(float64x2_t __p0) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { poly64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { poly64x1_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); @@ -47265,13 +49011,13 @@ __ai poly64x1_t vget_low_p64(poly64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_low_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { float64x1_t __ret; __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai float64x1_t vget_low_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { float64x1_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev0, 0); @@ -48511,13 +50257,13 @@ __ai float64x1_t vget_low_f64(float64x2_t __p0) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -48527,19 +50273,19 @@ __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); return __ret; } #else -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); @@ -48548,13 +50294,13 @@ __ai float64_t vmaxnmvq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); return __ret; } #else -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); @@ -48563,13 +50309,13 @@ __ai float32_t vmaxnmvq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); return __ret; } #else -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); @@ -48578,13 +50324,13 @@ __ai float32_t vmaxnmv_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); return __ret; } #else -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); @@ -48593,13 +50339,13 @@ __ai uint8_t vmaxvq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); return __ret; } #else -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); @@ -48608,13 +50354,13 @@ __ai uint32_t vmaxvq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); return __ret; } #else -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); @@ -48623,13 +50369,13 @@ __ai uint16_t vmaxvq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); return __ret; } #else -__ai int8_t vmaxvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); @@ -48638,13 +50384,13 @@ __ai int8_t vmaxvq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); return __ret; } #else -__ai float64_t vmaxvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); @@ -48653,13 +50399,13 @@ __ai float64_t vmaxvq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); return __ret; } #else -__ai float32_t vmaxvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); @@ -48668,13 +50414,13 @@ __ai float32_t vmaxvq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); return __ret; } #else -__ai int32_t vmaxvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); @@ -48683,13 +50429,13 @@ __ai int32_t vmaxvq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); return __ret; } #else -__ai int16_t vmaxvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); @@ -48698,13 +50444,13 @@ __ai int16_t vmaxvq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); return __ret; } #else -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); @@ -48713,13 +50459,13 @@ __ai uint8_t vmaxv_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); return __ret; } #else -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); @@ -48728,13 +50474,13 @@ __ai uint32_t vmaxv_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); return __ret; } #else -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); @@ -48743,13 +50489,13 @@ __ai uint16_t vmaxv_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); return __ret; } #else -__ai int8_t vmaxv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); @@ -48758,13 +50504,13 @@ __ai int8_t vmaxv_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); return __ret; } #else -__ai float32_t vmaxv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); @@ -48773,13 +50519,13 @@ __ai float32_t vmaxv_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); return __ret; } #else -__ai int32_t vmaxv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); @@ -48788,13 +50534,13 @@ __ai int32_t vmaxv_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); return __ret; } #else -__ai int16_t vmaxv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); @@ -48803,13 +50549,13 @@ __ai int16_t vmaxv_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -48819,19 +50565,19 @@ __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64_t vminnmvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); return __ret; } #else -__ai float64_t vminnmvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); @@ -48840,13 +50586,13 @@ __ai float64_t vminnmvq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); return __ret; } #else -__ai float32_t vminnmvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); @@ -48855,13 +50601,13 @@ __ai float32_t vminnmvq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); return __ret; } #else -__ai float32_t vminnmv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); @@ -48870,13 +50616,13 @@ __ai float32_t vminnmv_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); return __ret; } #else -__ai uint8_t vminvq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); @@ -48885,13 +50631,13 @@ __ai uint8_t vminvq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); return __ret; } #else -__ai uint32_t vminvq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); @@ -48900,13 +50646,13 @@ __ai uint32_t vminvq_u32(uint32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); return __ret; } #else -__ai uint16_t vminvq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); @@ -48915,13 +50661,13 @@ __ai uint16_t vminvq_u16(uint16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vminvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); return __ret; } #else -__ai int8_t vminvq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); @@ -48930,13 +50676,13 @@ __ai int8_t vminvq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vminvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); return __ret; } #else -__ai float64_t vminvq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); @@ -48945,13 +50691,13 @@ __ai float64_t vminvq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); return __ret; } #else -__ai float32_t vminvq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); @@ -48960,13 +50706,13 @@ __ai float32_t vminvq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vminvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); return __ret; } #else -__ai int32_t vminvq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); @@ -48975,13 +50721,13 @@ __ai int32_t vminvq_s32(int32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vminvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); return __ret; } #else -__ai int16_t vminvq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); @@ -48990,13 +50736,13 @@ __ai int16_t vminvq_s16(int16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); return __ret; } #else -__ai uint8_t vminv_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { uint8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); @@ -49005,13 +50751,13 @@ __ai uint8_t vminv_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); return __ret; } #else -__ai uint32_t vminv_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { uint32_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); @@ -49020,13 +50766,13 @@ __ai uint32_t vminv_u32(uint32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); return __ret; } #else -__ai uint16_t vminv_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { uint16_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); @@ -49035,13 +50781,13 @@ __ai uint16_t vminv_u16(uint16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vminv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vminv_s8(__p0); return __ret; } #else -__ai int8_t vminv_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { int8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); @@ -49050,13 +50796,13 @@ __ai int8_t vminv_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vminv_f32(__p0); return __ret; } #else -__ai float32_t vminv_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); @@ -49065,13 +50811,13 @@ __ai float32_t vminv_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vminv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vminv_s32(__p0); return __ret; } #else -__ai int32_t vminv_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { int32_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); @@ -49080,13 +50826,13 @@ __ai int32_t vminv_s32(int32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vminv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vminv_s16(__p0); return __ret; } #else -__ai int16_t vminv_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { int16_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); @@ -49095,13 +50841,13 @@ __ai int16_t vminv_s16(int16x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -49112,547 +50858,547 @@ __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) } #endif -__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = __p0 + __p1 * __p2; return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ - uint32x4_t __ret_427; \ - uint32x4_t __s0_427 = __p0_427; \ - uint32x4_t __s1_427 = __p1_427; \ - uint32x4_t __s2_427 = __p2_427; \ - __ret_427 = __s0_427 + __s1_427 * splatq_laneq_u32(__s2_427, __p3_427); \ - __ret_427; \ -}) -#else -#define vmlaq_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ - uint32x4_t __ret_428; \ - uint32x4_t __s0_428 = __p0_428; \ - uint32x4_t __s1_428 = __p1_428; \ - uint32x4_t __s2_428 = __p2_428; \ - uint32x4_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \ - uint32x4_t __rev1_428; __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \ - uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ - __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_u32(__rev2_428, __p3_428); \ - __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \ - __ret_428; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u16(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ - uint16x8_t __ret_429; \ - uint16x8_t __s0_429 = __p0_429; \ - uint16x8_t __s1_429 = __p1_429; \ - uint16x8_t __s2_429 = __p2_429; \ - __ret_429 = __s0_429 + __s1_429 * splatq_laneq_u16(__s2_429, __p3_429); \ - __ret_429; \ -}) -#else -#define vmlaq_laneq_u16(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ - uint16x8_t __ret_430; \ - uint16x8_t __s0_430 = __p0_430; \ - uint16x8_t __s1_430 = __p1_430; \ - uint16x8_t __s2_430 = __p2_430; \ - uint16x8_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_u16(__rev2_430, __p3_430); \ - __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_430; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ - float32x4_t __ret_431; \ - float32x4_t __s0_431 = __p0_431; \ - float32x4_t __s1_431 = __p1_431; \ - float32x4_t __s2_431 = __p2_431; \ - __ret_431 = __s0_431 + __s1_431 * splatq_laneq_f32(__s2_431, __p3_431); \ - __ret_431; \ -}) -#else -#define vmlaq_laneq_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ - float32x4_t __ret_432; \ - float32x4_t __s0_432 = __p0_432; \ - float32x4_t __s1_432 = __p1_432; \ - float32x4_t __s2_432 = __p2_432; \ - float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ - float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ - float32x4_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 3, 2, 1, 0); \ - __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_f32(__rev2_432, __p3_432); \ - __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ - __ret_432; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ - int32x4_t __ret_433; \ - int32x4_t __s0_433 = __p0_433; \ - int32x4_t __s1_433 = __p1_433; \ - int32x4_t __s2_433 = __p2_433; \ - __ret_433 = __s0_433 + __s1_433 * splatq_laneq_s32(__s2_433, __p3_433); \ - __ret_433; \ -}) -#else -#define vmlaq_laneq_s32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ - int32x4_t __ret_434; \ - int32x4_t __s0_434 = __p0_434; \ - int32x4_t __s1_434 = __p1_434; \ - int32x4_t __s2_434 = __p2_434; \ - int32x4_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 3, 2, 1, 0); \ - int32x4_t __rev1_434; __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 3, 2, 1, 0); \ - int32x4_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \ - __ret_434 = __rev0_434 + __rev1_434 * __noswap_splatq_laneq_s32(__rev2_434, __p3_434); \ - __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 3, 2, 1, 0); \ - __ret_434; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ - int16x8_t __ret_435; \ - int16x8_t __s0_435 = __p0_435; \ - int16x8_t __s1_435 = __p1_435; \ - int16x8_t __s2_435 = __p2_435; \ - __ret_435 = __s0_435 + __s1_435 * splatq_laneq_s16(__s2_435, __p3_435); \ - __ret_435; \ -}) -#else -#define vmlaq_laneq_s16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ - int16x8_t __ret_436; \ - int16x8_t __s0_436 = __p0_436; \ - int16x8_t __s1_436 = __p1_436; \ - int16x8_t __s2_436 = __p2_436; \ - int16x8_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_436; __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_436 = __rev0_436 + __rev1_436 * __noswap_splatq_laneq_s16(__rev2_436, __p3_436); \ - __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_436; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ - uint32x2_t __ret_437; \ - uint32x2_t __s0_437 = __p0_437; \ - uint32x2_t __s1_437 = __p1_437; \ - uint32x4_t __s2_437 = __p2_437; \ - __ret_437 = __s0_437 + __s1_437 * splat_laneq_u32(__s2_437, __p3_437); \ - __ret_437; \ -}) -#else -#define vmla_laneq_u32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ - uint32x2_t __ret_438; \ - uint32x2_t __s0_438 = __p0_438; \ - uint32x2_t __s1_438 = __p1_438; \ - uint32x4_t __s2_438 = __p2_438; \ - uint32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ - uint32x2_t __rev1_438; __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \ - uint32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ - __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_u32(__rev2_438, __p3_438); \ - __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ - __ret_438; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u16(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ - uint16x4_t __ret_439; \ - uint16x4_t __s0_439 = __p0_439; \ - uint16x4_t __s1_439 = __p1_439; \ - uint16x8_t __s2_439 = __p2_439; \ - __ret_439 = __s0_439 + __s1_439 * splat_laneq_u16(__s2_439, __p3_439); \ - __ret_439; \ -}) -#else -#define vmla_laneq_u16(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ - uint16x4_t __ret_440; \ - uint16x4_t __s0_440 = __p0_440; \ - uint16x4_t __s1_440 = __p1_440; \ - uint16x8_t __s2_440 = __p2_440; \ - uint16x4_t __rev0_440; __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 3, 2, 1, 0); \ - uint16x4_t __rev1_440; __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 3, 2, 1, 0); \ - uint16x8_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_u16(__rev2_440, __p3_440); \ - __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 3, 2, 1, 0); \ - __ret_440; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_f32(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ - float32x2_t __ret_441; \ - float32x2_t __s0_441 = __p0_441; \ - float32x2_t __s1_441 = __p1_441; \ - float32x4_t __s2_441 = __p2_441; \ - __ret_441 = __s0_441 + __s1_441 * splat_laneq_f32(__s2_441, __p3_441); \ - __ret_441; \ -}) -#else -#define vmla_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ - float32x2_t __ret_442; \ - float32x2_t __s0_442 = __p0_442; \ - float32x2_t __s1_442 = __p1_442; \ - float32x4_t __s2_442 = __p2_442; \ - float32x2_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 1, 0); \ - float32x2_t __rev1_442; __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 1, 0); \ - float32x4_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 3, 2, 1, 0); \ - __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_f32(__rev2_442, __p3_442); \ - __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 1, 0); \ - __ret_442; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ - int32x2_t __ret_443; \ - int32x2_t __s0_443 = __p0_443; \ - int32x2_t __s1_443 = __p1_443; \ - int32x4_t __s2_443 = __p2_443; \ - __ret_443 = __s0_443 + __s1_443 * splat_laneq_s32(__s2_443, __p3_443); \ - __ret_443; \ -}) -#else -#define vmla_laneq_s32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ - int32x2_t __ret_444; \ - int32x2_t __s0_444 = __p0_444; \ - int32x2_t __s1_444 = __p1_444; \ - int32x4_t __s2_444 = __p2_444; \ - int32x2_t __rev0_444; __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \ - int32x2_t __rev1_444; __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 1, 0); \ - int32x4_t __rev2_444; __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 3, 2, 1, 0); \ - __ret_444 = __rev0_444 + __rev1_444 * __noswap_splat_laneq_s32(__rev2_444, __p3_444); \ - __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \ - __ret_444; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ - int16x4_t __ret_445; \ - int16x4_t __s0_445 = __p0_445; \ - int16x4_t __s1_445 = __p1_445; \ - int16x8_t __s2_445 = __p2_445; \ - __ret_445 = __s0_445 + __s1_445 * splat_laneq_s16(__s2_445, __p3_445); \ - __ret_445; \ -}) -#else -#define vmla_laneq_s16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ - int16x4_t __ret_446; \ - int16x4_t __s0_446 = __p0_446; \ - int16x4_t __s1_446 = __p1_446; \ - int16x8_t __s2_446 = __p2_446; \ - int16x4_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \ - int16x4_t __rev1_446; __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 3, 2, 1, 0); \ - int16x8_t __rev2_446; __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_446 = __rev0_446 + __rev1_446 * __noswap_splat_laneq_s16(__rev2_446, __p3_446); \ - __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \ - __ret_446; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ - uint64x2_t __ret_447; \ - uint64x2_t __s0_447 = __p0_447; \ - uint32x4_t __s1_447 = __p1_447; \ - uint32x2_t __s2_447 = __p2_447; \ - __ret_447 = __s0_447 + vmull_u32(vget_high_u32(__s1_447), splat_lane_u32(__s2_447, __p3_447)); \ - __ret_447; \ -}) -#else -#define vmlal_high_lane_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ - uint64x2_t __ret_448; \ - uint64x2_t __s0_448 = __p0_448; \ +#define vmlaq_laneq_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ + uint32x4_t __ret_448; \ + uint32x4_t __s0_448 = __p0_448; \ uint32x4_t __s1_448 = __p1_448; \ - uint32x2_t __s2_448 = __p2_448; \ - uint64x2_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \ - uint32x4_t __rev1_448; __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \ - uint32x2_t __rev2_448; __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \ - __ret_448 = __rev0_448 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_448), __noswap_splat_lane_u32(__rev2_448, __p3_448)); \ - __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \ + uint32x4_t __s2_448 = __p2_448; \ + __ret_448 = __s0_448 + __s1_448 * splatq_laneq_u32(__s2_448, __p3_448); \ __ret_448; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ +#else +#define vmlaq_laneq_u32(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ uint32x4_t __ret_449; \ uint32x4_t __s0_449 = __p0_449; \ - uint16x8_t __s1_449 = __p1_449; \ - uint16x4_t __s2_449 = __p2_449; \ - __ret_449 = __s0_449 + vmull_u16(vget_high_u16(__s1_449), splat_lane_u16(__s2_449, __p3_449)); \ + uint32x4_t __s1_449 = __p1_449; \ + uint32x4_t __s2_449 = __p2_449; \ + uint32x4_t __rev0_449; __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \ + uint32x4_t __rev1_449; __rev1_449 = __builtin_shufflevector(__s1_449, __s1_449, 3, 2, 1, 0); \ + uint32x4_t __rev2_449; __rev2_449 = __builtin_shufflevector(__s2_449, __s2_449, 3, 2, 1, 0); \ + __ret_449 = __rev0_449 + __rev1_449 * __noswap_splatq_laneq_u32(__rev2_449, __p3_449); \ + __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 3, 2, 1, 0); \ __ret_449; \ }) -#else -#define vmlal_high_lane_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ - uint32x4_t __ret_450; \ - uint32x4_t __s0_450 = __p0_450; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ + uint16x8_t __ret_450; \ + uint16x8_t __s0_450 = __p0_450; \ uint16x8_t __s1_450 = __p1_450; \ - uint16x4_t __s2_450 = __p2_450; \ - uint32x4_t __rev0_450; __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \ - uint16x8_t __rev1_450; __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_450; __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \ - __ret_450 = __rev0_450 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_450), __noswap_splat_lane_u16(__rev2_450, __p3_450)); \ - __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \ + uint16x8_t __s2_450 = __p2_450; \ + __ret_450 = __s0_450 + __s1_450 * splatq_laneq_u16(__s2_450, __p3_450); \ __ret_450; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ - int64x2_t __ret_451; \ - int64x2_t __s0_451 = __p0_451; \ - int32x4_t __s1_451 = __p1_451; \ - int32x2_t __s2_451 = __p2_451; \ - __ret_451 = __s0_451 + vmull_s32(vget_high_s32(__s1_451), splat_lane_s32(__s2_451, __p3_451)); \ +#else +#define vmlaq_laneq_u16(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ + uint16x8_t __ret_451; \ + uint16x8_t __s0_451 = __p0_451; \ + uint16x8_t __s1_451 = __p1_451; \ + uint16x8_t __s2_451 = __p2_451; \ + uint16x8_t __rev0_451; __rev0_451 = __builtin_shufflevector(__s0_451, __s0_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_451; __rev1_451 = __builtin_shufflevector(__s1_451, __s1_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_451; __rev2_451 = __builtin_shufflevector(__s2_451, __s2_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_451 = __rev0_451 + __rev1_451 * __noswap_splatq_laneq_u16(__rev2_451, __p3_451); \ + __ret_451 = __builtin_shufflevector(__ret_451, __ret_451, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_451; \ }) -#else -#define vmlal_high_lane_s32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ - int64x2_t __ret_452; \ - int64x2_t __s0_452 = __p0_452; \ - int32x4_t __s1_452 = __p1_452; \ - int32x2_t __s2_452 = __p2_452; \ - int64x2_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \ - int32x4_t __rev1_452; __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \ - int32x2_t __rev2_452; __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 1, 0); \ - __ret_452 = __rev0_452 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_452), __noswap_splat_lane_s32(__rev2_452, __p3_452)); \ - __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_f32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ + float32x4_t __ret_452; \ + float32x4_t __s0_452 = __p0_452; \ + float32x4_t __s1_452 = __p1_452; \ + float32x4_t __s2_452 = __p2_452; \ + __ret_452 = __s0_452 + __s1_452 * splatq_laneq_f32(__s2_452, __p3_452); \ __ret_452; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ - int32x4_t __ret_453; \ - int32x4_t __s0_453 = __p0_453; \ - int16x8_t __s1_453 = __p1_453; \ - int16x4_t __s2_453 = __p2_453; \ - __ret_453 = __s0_453 + vmull_s16(vget_high_s16(__s1_453), splat_lane_s16(__s2_453, __p3_453)); \ +#else +#define vmlaq_laneq_f32(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ + float32x4_t __ret_453; \ + float32x4_t __s0_453 = __p0_453; \ + float32x4_t __s1_453 = __p1_453; \ + float32x4_t __s2_453 = __p2_453; \ + float32x4_t __rev0_453; __rev0_453 = __builtin_shufflevector(__s0_453, __s0_453, 3, 2, 1, 0); \ + float32x4_t __rev1_453; __rev1_453 = __builtin_shufflevector(__s1_453, __s1_453, 3, 2, 1, 0); \ + float32x4_t __rev2_453; __rev2_453 = __builtin_shufflevector(__s2_453, __s2_453, 3, 2, 1, 0); \ + __ret_453 = __rev0_453 + __rev1_453 * __noswap_splatq_laneq_f32(__rev2_453, __p3_453); \ + __ret_453 = __builtin_shufflevector(__ret_453, __ret_453, 3, 2, 1, 0); \ __ret_453; \ }) -#else -#define vmlal_high_lane_s16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s32(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ int32x4_t __ret_454; \ int32x4_t __s0_454 = __p0_454; \ - int16x8_t __s1_454 = __p1_454; \ - int16x4_t __s2_454 = __p2_454; \ - int32x4_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \ - int16x8_t __rev1_454; __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_454; __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 3, 2, 1, 0); \ - __ret_454 = __rev0_454 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_454), __noswap_splat_lane_s16(__rev2_454, __p3_454)); \ - __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \ + int32x4_t __s1_454 = __p1_454; \ + int32x4_t __s2_454 = __p2_454; \ + __ret_454 = __s0_454 + __s1_454 * splatq_laneq_s32(__s2_454, __p3_454); \ __ret_454; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ - uint64x2_t __ret_455; \ - uint64x2_t __s0_455 = __p0_455; \ - uint32x4_t __s1_455 = __p1_455; \ - uint32x4_t __s2_455 = __p2_455; \ - __ret_455 = __s0_455 + vmull_u32(vget_high_u32(__s1_455), splat_laneq_u32(__s2_455, __p3_455)); \ +#else +#define vmlaq_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ + int32x4_t __ret_455; \ + int32x4_t __s0_455 = __p0_455; \ + int32x4_t __s1_455 = __p1_455; \ + int32x4_t __s2_455 = __p2_455; \ + int32x4_t __rev0_455; __rev0_455 = __builtin_shufflevector(__s0_455, __s0_455, 3, 2, 1, 0); \ + int32x4_t __rev1_455; __rev1_455 = __builtin_shufflevector(__s1_455, __s1_455, 3, 2, 1, 0); \ + int32x4_t __rev2_455; __rev2_455 = __builtin_shufflevector(__s2_455, __s2_455, 3, 2, 1, 0); \ + __ret_455 = __rev0_455 + __rev1_455 * __noswap_splatq_laneq_s32(__rev2_455, __p3_455); \ + __ret_455 = __builtin_shufflevector(__ret_455, __ret_455, 3, 2, 1, 0); \ __ret_455; \ }) -#else -#define vmlal_high_laneq_u32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ - uint64x2_t __ret_456; \ - uint64x2_t __s0_456 = __p0_456; \ - uint32x4_t __s1_456 = __p1_456; \ - uint32x4_t __s2_456 = __p2_456; \ - uint64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ - uint32x4_t __rev1_456; __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \ - uint32x4_t __rev2_456; __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \ - __ret_456 = __rev0_456 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_456), __noswap_splat_laneq_u32(__rev2_456, __p3_456)); \ - __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s16(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ + int16x8_t __ret_456; \ + int16x8_t __s0_456 = __p0_456; \ + int16x8_t __s1_456 = __p1_456; \ + int16x8_t __s2_456 = __p2_456; \ + __ret_456 = __s0_456 + __s1_456 * splatq_laneq_s16(__s2_456, __p3_456); \ __ret_456; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ - uint32x4_t __ret_457; \ - uint32x4_t __s0_457 = __p0_457; \ - uint16x8_t __s1_457 = __p1_457; \ - uint16x8_t __s2_457 = __p2_457; \ - __ret_457 = __s0_457 + vmull_u16(vget_high_u16(__s1_457), splat_laneq_u16(__s2_457, __p3_457)); \ +#else +#define vmlaq_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ + int16x8_t __ret_457; \ + int16x8_t __s0_457 = __p0_457; \ + int16x8_t __s1_457 = __p1_457; \ + int16x8_t __s2_457 = __p2_457; \ + int16x8_t __rev0_457; __rev0_457 = __builtin_shufflevector(__s0_457, __s0_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_457; __rev1_457 = __builtin_shufflevector(__s1_457, __s1_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_457; __rev2_457 = __builtin_shufflevector(__s2_457, __s2_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_457 = __rev0_457 + __rev1_457 * __noswap_splatq_laneq_s16(__rev2_457, __p3_457); \ + __ret_457 = __builtin_shufflevector(__ret_457, __ret_457, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_457; \ }) -#else -#define vmlal_high_laneq_u16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ - uint32x4_t __ret_458; \ - uint32x4_t __s0_458 = __p0_458; \ - uint16x8_t __s1_458 = __p1_458; \ - uint16x8_t __s2_458 = __p2_458; \ - uint32x4_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \ - uint16x8_t __rev1_458; __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_458; __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_458 = __rev0_458 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_458), __noswap_splat_laneq_u16(__rev2_458, __p3_458)); \ - __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u32(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ + uint32x2_t __ret_458; \ + uint32x2_t __s0_458 = __p0_458; \ + uint32x2_t __s1_458 = __p1_458; \ + uint32x4_t __s2_458 = __p2_458; \ + __ret_458 = __s0_458 + __s1_458 * splat_laneq_u32(__s2_458, __p3_458); \ __ret_458; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ - int64x2_t __ret_459; \ - int64x2_t __s0_459 = __p0_459; \ - int32x4_t __s1_459 = __p1_459; \ - int32x4_t __s2_459 = __p2_459; \ - __ret_459 = __s0_459 + vmull_s32(vget_high_s32(__s1_459), splat_laneq_s32(__s2_459, __p3_459)); \ +#else +#define vmla_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ + uint32x2_t __ret_459; \ + uint32x2_t __s0_459 = __p0_459; \ + uint32x2_t __s1_459 = __p1_459; \ + uint32x4_t __s2_459 = __p2_459; \ + uint32x2_t __rev0_459; __rev0_459 = __builtin_shufflevector(__s0_459, __s0_459, 1, 0); \ + uint32x2_t __rev1_459; __rev1_459 = __builtin_shufflevector(__s1_459, __s1_459, 1, 0); \ + uint32x4_t __rev2_459; __rev2_459 = __builtin_shufflevector(__s2_459, __s2_459, 3, 2, 1, 0); \ + __ret_459 = __rev0_459 + __rev1_459 * __noswap_splat_laneq_u32(__rev2_459, __p3_459); \ + __ret_459 = __builtin_shufflevector(__ret_459, __ret_459, 1, 0); \ __ret_459; \ }) -#else -#define vmlal_high_laneq_s32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ - int64x2_t __ret_460; \ - int64x2_t __s0_460 = __p0_460; \ - int32x4_t __s1_460 = __p1_460; \ - int32x4_t __s2_460 = __p2_460; \ - int64x2_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \ - int32x4_t __rev1_460; __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 3, 2, 1, 0); \ - int32x4_t __rev2_460; __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \ - __ret_460 = __rev0_460 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_460), __noswap_splat_laneq_s32(__rev2_460, __p3_460)); \ - __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u16(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ + uint16x4_t __ret_460; \ + uint16x4_t __s0_460 = __p0_460; \ + uint16x4_t __s1_460 = __p1_460; \ + uint16x8_t __s2_460 = __p2_460; \ + __ret_460 = __s0_460 + __s1_460 * splat_laneq_u16(__s2_460, __p3_460); \ __ret_460; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ - int32x4_t __ret_461; \ - int32x4_t __s0_461 = __p0_461; \ - int16x8_t __s1_461 = __p1_461; \ - int16x8_t __s2_461 = __p2_461; \ - __ret_461 = __s0_461 + vmull_s16(vget_high_s16(__s1_461), splat_laneq_s16(__s2_461, __p3_461)); \ +#else +#define vmla_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ + uint16x4_t __ret_461; \ + uint16x4_t __s0_461 = __p0_461; \ + uint16x4_t __s1_461 = __p1_461; \ + uint16x8_t __s2_461 = __p2_461; \ + uint16x4_t __rev0_461; __rev0_461 = __builtin_shufflevector(__s0_461, __s0_461, 3, 2, 1, 0); \ + uint16x4_t __rev1_461; __rev1_461 = __builtin_shufflevector(__s1_461, __s1_461, 3, 2, 1, 0); \ + uint16x8_t __rev2_461; __rev2_461 = __builtin_shufflevector(__s2_461, __s2_461, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_461 = __rev0_461 + __rev1_461 * __noswap_splat_laneq_u16(__rev2_461, __p3_461); \ + __ret_461 = __builtin_shufflevector(__ret_461, __ret_461, 3, 2, 1, 0); \ __ret_461; \ }) -#else -#define vmlal_high_laneq_s16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ - int32x4_t __ret_462; \ - int32x4_t __s0_462 = __p0_462; \ - int16x8_t __s1_462 = __p1_462; \ - int16x8_t __s2_462 = __p2_462; \ - int32x4_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \ - int16x8_t __rev1_462; __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_462; __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_462 = __rev0_462 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_462), __noswap_splat_laneq_s16(__rev2_462, __p3_462)); \ - __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_f32(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ + float32x2_t __ret_462; \ + float32x2_t __s0_462 = __p0_462; \ + float32x2_t __s1_462 = __p1_462; \ + float32x4_t __s2_462 = __p2_462; \ + __ret_462 = __s0_462 + __s1_462 * splat_laneq_f32(__s2_462, __p3_462); \ __ret_462; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ - uint64x2_t __ret_463; \ - uint64x2_t __s0_463 = __p0_463; \ - uint32x2_t __s1_463 = __p1_463; \ - uint32x4_t __s2_463 = __p2_463; \ - __ret_463 = __s0_463 + vmull_u32(__s1_463, splat_laneq_u32(__s2_463, __p3_463)); \ +#else +#define vmla_laneq_f32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ + float32x2_t __ret_463; \ + float32x2_t __s0_463 = __p0_463; \ + float32x2_t __s1_463 = __p1_463; \ + float32x4_t __s2_463 = __p2_463; \ + float32x2_t __rev0_463; __rev0_463 = __builtin_shufflevector(__s0_463, __s0_463, 1, 0); \ + float32x2_t __rev1_463; __rev1_463 = __builtin_shufflevector(__s1_463, __s1_463, 1, 0); \ + float32x4_t __rev2_463; __rev2_463 = __builtin_shufflevector(__s2_463, __s2_463, 3, 2, 1, 0); \ + __ret_463 = __rev0_463 + __rev1_463 * __noswap_splat_laneq_f32(__rev2_463, __p3_463); \ + __ret_463 = __builtin_shufflevector(__ret_463, __ret_463, 1, 0); \ __ret_463; \ }) -#else -#define vmlal_laneq_u32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ - uint64x2_t __ret_464; \ - uint64x2_t __s0_464 = __p0_464; \ - uint32x2_t __s1_464 = __p1_464; \ - uint32x4_t __s2_464 = __p2_464; \ - uint64x2_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \ - uint32x2_t __rev1_464; __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \ - uint32x4_t __rev2_464; __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \ - __ret_464 = __rev0_464 + __noswap_vmull_u32(__rev1_464, __noswap_splat_laneq_u32(__rev2_464, __p3_464)); \ - __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ + int32x2_t __ret_464; \ + int32x2_t __s0_464 = __p0_464; \ + int32x2_t __s1_464 = __p1_464; \ + int32x4_t __s2_464 = __p2_464; \ + __ret_464 = __s0_464 + __s1_464 * splat_laneq_s32(__s2_464, __p3_464); \ __ret_464; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ - uint32x4_t __ret_465; \ - uint32x4_t __s0_465 = __p0_465; \ - uint16x4_t __s1_465 = __p1_465; \ - uint16x8_t __s2_465 = __p2_465; \ - __ret_465 = __s0_465 + vmull_u16(__s1_465, splat_laneq_u16(__s2_465, __p3_465)); \ +#else +#define vmla_laneq_s32(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ + int32x2_t __ret_465; \ + int32x2_t __s0_465 = __p0_465; \ + int32x2_t __s1_465 = __p1_465; \ + int32x4_t __s2_465 = __p2_465; \ + int32x2_t __rev0_465; __rev0_465 = __builtin_shufflevector(__s0_465, __s0_465, 1, 0); \ + int32x2_t __rev1_465; __rev1_465 = __builtin_shufflevector(__s1_465, __s1_465, 1, 0); \ + int32x4_t __rev2_465; __rev2_465 = __builtin_shufflevector(__s2_465, __s2_465, 3, 2, 1, 0); \ + __ret_465 = __rev0_465 + __rev1_465 * __noswap_splat_laneq_s32(__rev2_465, __p3_465); \ + __ret_465 = __builtin_shufflevector(__ret_465, __ret_465, 1, 0); \ __ret_465; \ }) -#else -#define vmlal_laneq_u16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ - uint32x4_t __ret_466; \ - uint32x4_t __s0_466 = __p0_466; \ - uint16x4_t __s1_466 = __p1_466; \ - uint16x8_t __s2_466 = __p2_466; \ - uint32x4_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \ - uint16x4_t __rev1_466; __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \ - uint16x8_t __rev2_466; __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_466 = __rev0_466 + __noswap_vmull_u16(__rev1_466, __noswap_splat_laneq_u16(__rev2_466, __p3_466)); \ - __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ + int16x4_t __ret_466; \ + int16x4_t __s0_466 = __p0_466; \ + int16x4_t __s1_466 = __p1_466; \ + int16x8_t __s2_466 = __p2_466; \ + __ret_466 = __s0_466 + __s1_466 * splat_laneq_s16(__s2_466, __p3_466); \ __ret_466; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ - int64x2_t __ret_467; \ - int64x2_t __s0_467 = __p0_467; \ - int32x2_t __s1_467 = __p1_467; \ - int32x4_t __s2_467 = __p2_467; \ - __ret_467 = __s0_467 + vmull_s32(__s1_467, splat_laneq_s32(__s2_467, __p3_467)); \ +#else +#define vmla_laneq_s16(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ + int16x4_t __ret_467; \ + int16x4_t __s0_467 = __p0_467; \ + int16x4_t __s1_467 = __p1_467; \ + int16x8_t __s2_467 = __p2_467; \ + int16x4_t __rev0_467; __rev0_467 = __builtin_shufflevector(__s0_467, __s0_467, 3, 2, 1, 0); \ + int16x4_t __rev1_467; __rev1_467 = __builtin_shufflevector(__s1_467, __s1_467, 3, 2, 1, 0); \ + int16x8_t __rev2_467; __rev2_467 = __builtin_shufflevector(__s2_467, __s2_467, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_467 = __rev0_467 + __rev1_467 * __noswap_splat_laneq_s16(__rev2_467, __p3_467); \ + __ret_467 = __builtin_shufflevector(__ret_467, __ret_467, 3, 2, 1, 0); \ __ret_467; \ }) -#else -#define vmlal_laneq_s32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ - int64x2_t __ret_468; \ - int64x2_t __s0_468 = __p0_468; \ - int32x2_t __s1_468 = __p1_468; \ - int32x4_t __s2_468 = __p2_468; \ - int64x2_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \ - int32x2_t __rev1_468; __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 1, 0); \ - int32x4_t __rev2_468; __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \ - __ret_468 = __rev0_468 + __noswap_vmull_s32(__rev1_468, __noswap_splat_laneq_s32(__rev2_468, __p3_468)); \ - __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ + uint64x2_t __ret_468; \ + uint64x2_t __s0_468 = __p0_468; \ + uint32x4_t __s1_468 = __p1_468; \ + uint32x2_t __s2_468 = __p2_468; \ + __ret_468 = __s0_468 + vmull_u32(vget_high_u32(__s1_468), splat_lane_u32(__s2_468, __p3_468)); \ __ret_468; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ - int32x4_t __ret_469; \ - int32x4_t __s0_469 = __p0_469; \ - int16x4_t __s1_469 = __p1_469; \ - int16x8_t __s2_469 = __p2_469; \ - __ret_469 = __s0_469 + vmull_s16(__s1_469, splat_laneq_s16(__s2_469, __p3_469)); \ +#else +#define vmlal_high_lane_u32(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ + uint64x2_t __ret_469; \ + uint64x2_t __s0_469 = __p0_469; \ + uint32x4_t __s1_469 = __p1_469; \ + uint32x2_t __s2_469 = __p2_469; \ + uint64x2_t __rev0_469; __rev0_469 = __builtin_shufflevector(__s0_469, __s0_469, 1, 0); \ + uint32x4_t __rev1_469; __rev1_469 = __builtin_shufflevector(__s1_469, __s1_469, 3, 2, 1, 0); \ + uint32x2_t __rev2_469; __rev2_469 = __builtin_shufflevector(__s2_469, __s2_469, 1, 0); \ + __ret_469 = __rev0_469 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_469), __noswap_splat_lane_u32(__rev2_469, __p3_469)); \ + __ret_469 = __builtin_shufflevector(__ret_469, __ret_469, 1, 0); \ __ret_469; \ }) -#else -#define vmlal_laneq_s16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ - int32x4_t __ret_470; \ - int32x4_t __s0_470 = __p0_470; \ - int16x4_t __s1_470 = __p1_470; \ - int16x8_t __s2_470 = __p2_470; \ - int32x4_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 3, 2, 1, 0); \ - int16x4_t __rev1_470; __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 3, 2, 1, 0); \ - int16x8_t __rev2_470; __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_470 = __rev0_470 + __noswap_vmull_s16(__rev1_470, __noswap_splat_laneq_s16(__rev2_470, __p3_470)); \ - __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ + uint32x4_t __ret_470; \ + uint32x4_t __s0_470 = __p0_470; \ + uint16x8_t __s1_470 = __p1_470; \ + uint16x4_t __s2_470 = __p2_470; \ + __ret_470 = __s0_470 + vmull_u16(vget_high_u16(__s1_470), splat_lane_u16(__s2_470, __p3_470)); \ __ret_470; \ }) +#else +#define vmlal_high_lane_u16(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ + uint32x4_t __ret_471; \ + uint32x4_t __s0_471 = __p0_471; \ + uint16x8_t __s1_471 = __p1_471; \ + uint16x4_t __s2_471 = __p2_471; \ + uint32x4_t __rev0_471; __rev0_471 = __builtin_shufflevector(__s0_471, __s0_471, 3, 2, 1, 0); \ + uint16x8_t __rev1_471; __rev1_471 = __builtin_shufflevector(__s1_471, __s1_471, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_471; __rev2_471 = __builtin_shufflevector(__s2_471, __s2_471, 3, 2, 1, 0); \ + __ret_471 = __rev0_471 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_471), __noswap_splat_lane_u16(__rev2_471, __p3_471)); \ + __ret_471 = __builtin_shufflevector(__ret_471, __ret_471, 3, 2, 1, 0); \ + __ret_471; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +#define vmlal_high_lane_s32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ + int64x2_t __ret_472; \ + int64x2_t __s0_472 = __p0_472; \ + int32x4_t __s1_472 = __p1_472; \ + int32x2_t __s2_472 = __p2_472; \ + __ret_472 = __s0_472 + vmull_s32(vget_high_s32(__s1_472), splat_lane_s32(__s2_472, __p3_472)); \ + __ret_472; \ +}) +#else +#define vmlal_high_lane_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ + int64x2_t __ret_473; \ + int64x2_t __s0_473 = __p0_473; \ + int32x4_t __s1_473 = __p1_473; \ + int32x2_t __s2_473 = __p2_473; \ + int64x2_t __rev0_473; __rev0_473 = __builtin_shufflevector(__s0_473, __s0_473, 1, 0); \ + int32x4_t __rev1_473; __rev1_473 = __builtin_shufflevector(__s1_473, __s1_473, 3, 2, 1, 0); \ + int32x2_t __rev2_473; __rev2_473 = __builtin_shufflevector(__s2_473, __s2_473, 1, 0); \ + __ret_473 = __rev0_473 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_473), __noswap_splat_lane_s32(__rev2_473, __p3_473)); \ + __ret_473 = __builtin_shufflevector(__ret_473, __ret_473, 1, 0); \ + __ret_473; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ + int32x4_t __ret_474; \ + int32x4_t __s0_474 = __p0_474; \ + int16x8_t __s1_474 = __p1_474; \ + int16x4_t __s2_474 = __p2_474; \ + __ret_474 = __s0_474 + vmull_s16(vget_high_s16(__s1_474), splat_lane_s16(__s2_474, __p3_474)); \ + __ret_474; \ +}) +#else +#define vmlal_high_lane_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ + int32x4_t __ret_475; \ + int32x4_t __s0_475 = __p0_475; \ + int16x8_t __s1_475 = __p1_475; \ + int16x4_t __s2_475 = __p2_475; \ + int32x4_t __rev0_475; __rev0_475 = __builtin_shufflevector(__s0_475, __s0_475, 3, 2, 1, 0); \ + int16x8_t __rev1_475; __rev1_475 = __builtin_shufflevector(__s1_475, __s1_475, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_475; __rev2_475 = __builtin_shufflevector(__s2_475, __s2_475, 3, 2, 1, 0); \ + __ret_475 = __rev0_475 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_475), __noswap_splat_lane_s16(__rev2_475, __p3_475)); \ + __ret_475 = __builtin_shufflevector(__ret_475, __ret_475, 3, 2, 1, 0); \ + __ret_475; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ + uint64x2_t __ret_476; \ + uint64x2_t __s0_476 = __p0_476; \ + uint32x4_t __s1_476 = __p1_476; \ + uint32x4_t __s2_476 = __p2_476; \ + __ret_476 = __s0_476 + vmull_u32(vget_high_u32(__s1_476), splat_laneq_u32(__s2_476, __p3_476)); \ + __ret_476; \ +}) +#else +#define vmlal_high_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ + uint64x2_t __ret_477; \ + uint64x2_t __s0_477 = __p0_477; \ + uint32x4_t __s1_477 = __p1_477; \ + uint32x4_t __s2_477 = __p2_477; \ + uint64x2_t __rev0_477; __rev0_477 = __builtin_shufflevector(__s0_477, __s0_477, 1, 0); \ + uint32x4_t __rev1_477; __rev1_477 = __builtin_shufflevector(__s1_477, __s1_477, 3, 2, 1, 0); \ + uint32x4_t __rev2_477; __rev2_477 = __builtin_shufflevector(__s2_477, __s2_477, 3, 2, 1, 0); \ + __ret_477 = __rev0_477 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_477), __noswap_splat_laneq_u32(__rev2_477, __p3_477)); \ + __ret_477 = __builtin_shufflevector(__ret_477, __ret_477, 1, 0); \ + __ret_477; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u16(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ + uint32x4_t __ret_478; \ + uint32x4_t __s0_478 = __p0_478; \ + uint16x8_t __s1_478 = __p1_478; \ + uint16x8_t __s2_478 = __p2_478; \ + __ret_478 = __s0_478 + vmull_u16(vget_high_u16(__s1_478), splat_laneq_u16(__s2_478, __p3_478)); \ + __ret_478; \ +}) +#else +#define vmlal_high_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ + uint32x4_t __ret_479; \ + uint32x4_t __s0_479 = __p0_479; \ + uint16x8_t __s1_479 = __p1_479; \ + uint16x8_t __s2_479 = __p2_479; \ + uint32x4_t __rev0_479; __rev0_479 = __builtin_shufflevector(__s0_479, __s0_479, 3, 2, 1, 0); \ + uint16x8_t __rev1_479; __rev1_479 = __builtin_shufflevector(__s1_479, __s1_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_479; __rev2_479 = __builtin_shufflevector(__s2_479, __s2_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_479 = __rev0_479 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_479), __noswap_splat_laneq_u16(__rev2_479, __p3_479)); \ + __ret_479 = __builtin_shufflevector(__ret_479, __ret_479, 3, 2, 1, 0); \ + __ret_479; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s32(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ + int64x2_t __ret_480; \ + int64x2_t __s0_480 = __p0_480; \ + int32x4_t __s1_480 = __p1_480; \ + int32x4_t __s2_480 = __p2_480; \ + __ret_480 = __s0_480 + vmull_s32(vget_high_s32(__s1_480), splat_laneq_s32(__s2_480, __p3_480)); \ + __ret_480; \ +}) +#else +#define vmlal_high_laneq_s32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ + int64x2_t __ret_481; \ + int64x2_t __s0_481 = __p0_481; \ + int32x4_t __s1_481 = __p1_481; \ + int32x4_t __s2_481 = __p2_481; \ + int64x2_t __rev0_481; __rev0_481 = __builtin_shufflevector(__s0_481, __s0_481, 1, 0); \ + int32x4_t __rev1_481; __rev1_481 = __builtin_shufflevector(__s1_481, __s1_481, 3, 2, 1, 0); \ + int32x4_t __rev2_481; __rev2_481 = __builtin_shufflevector(__s2_481, __s2_481, 3, 2, 1, 0); \ + __ret_481 = __rev0_481 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_481), __noswap_splat_laneq_s32(__rev2_481, __p3_481)); \ + __ret_481 = __builtin_shufflevector(__ret_481, __ret_481, 1, 0); \ + __ret_481; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s16(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ + int32x4_t __ret_482; \ + int32x4_t __s0_482 = __p0_482; \ + int16x8_t __s1_482 = __p1_482; \ + int16x8_t __s2_482 = __p2_482; \ + __ret_482 = __s0_482 + vmull_s16(vget_high_s16(__s1_482), splat_laneq_s16(__s2_482, __p3_482)); \ + __ret_482; \ +}) +#else +#define vmlal_high_laneq_s16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ + int32x4_t __ret_483; \ + int32x4_t __s0_483 = __p0_483; \ + int16x8_t __s1_483 = __p1_483; \ + int16x8_t __s2_483 = __p2_483; \ + int32x4_t __rev0_483; __rev0_483 = __builtin_shufflevector(__s0_483, __s0_483, 3, 2, 1, 0); \ + int16x8_t __rev1_483; __rev1_483 = __builtin_shufflevector(__s1_483, __s1_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_483; __rev2_483 = __builtin_shufflevector(__s2_483, __s2_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_483 = __rev0_483 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_483), __noswap_splat_laneq_s16(__rev2_483, __p3_483)); \ + __ret_483 = __builtin_shufflevector(__ret_483, __ret_483, 3, 2, 1, 0); \ + __ret_483; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ + uint64x2_t __ret_484; \ + uint64x2_t __s0_484 = __p0_484; \ + uint32x2_t __s1_484 = __p1_484; \ + uint32x4_t __s2_484 = __p2_484; \ + __ret_484 = __s0_484 + vmull_u32(__s1_484, splat_laneq_u32(__s2_484, __p3_484)); \ + __ret_484; \ +}) +#else +#define vmlal_laneq_u32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ + uint64x2_t __ret_485; \ + uint64x2_t __s0_485 = __p0_485; \ + uint32x2_t __s1_485 = __p1_485; \ + uint32x4_t __s2_485 = __p2_485; \ + uint64x2_t __rev0_485; __rev0_485 = __builtin_shufflevector(__s0_485, __s0_485, 1, 0); \ + uint32x2_t __rev1_485; __rev1_485 = __builtin_shufflevector(__s1_485, __s1_485, 1, 0); \ + uint32x4_t __rev2_485; __rev2_485 = __builtin_shufflevector(__s2_485, __s2_485, 3, 2, 1, 0); \ + __ret_485 = __rev0_485 + __noswap_vmull_u32(__rev1_485, __noswap_splat_laneq_u32(__rev2_485, __p3_485)); \ + __ret_485 = __builtin_shufflevector(__ret_485, __ret_485, 1, 0); \ + __ret_485; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ + uint32x4_t __ret_486; \ + uint32x4_t __s0_486 = __p0_486; \ + uint16x4_t __s1_486 = __p1_486; \ + uint16x8_t __s2_486 = __p2_486; \ + __ret_486 = __s0_486 + vmull_u16(__s1_486, splat_laneq_u16(__s2_486, __p3_486)); \ + __ret_486; \ +}) +#else +#define vmlal_laneq_u16(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ + uint32x4_t __ret_487; \ + uint32x4_t __s0_487 = __p0_487; \ + uint16x4_t __s1_487 = __p1_487; \ + uint16x8_t __s2_487 = __p2_487; \ + uint32x4_t __rev0_487; __rev0_487 = __builtin_shufflevector(__s0_487, __s0_487, 3, 2, 1, 0); \ + uint16x4_t __rev1_487; __rev1_487 = __builtin_shufflevector(__s1_487, __s1_487, 3, 2, 1, 0); \ + uint16x8_t __rev2_487; __rev2_487 = __builtin_shufflevector(__s2_487, __s2_487, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_487 = __rev0_487 + __noswap_vmull_u16(__rev1_487, __noswap_splat_laneq_u16(__rev2_487, __p3_487)); \ + __ret_487 = __builtin_shufflevector(__ret_487, __ret_487, 3, 2, 1, 0); \ + __ret_487; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ + int64x2_t __ret_488; \ + int64x2_t __s0_488 = __p0_488; \ + int32x2_t __s1_488 = __p1_488; \ + int32x4_t __s2_488 = __p2_488; \ + __ret_488 = __s0_488 + vmull_s32(__s1_488, splat_laneq_s32(__s2_488, __p3_488)); \ + __ret_488; \ +}) +#else +#define vmlal_laneq_s32(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ + int64x2_t __ret_489; \ + int64x2_t __s0_489 = __p0_489; \ + int32x2_t __s1_489 = __p1_489; \ + int32x4_t __s2_489 = __p2_489; \ + int64x2_t __rev0_489; __rev0_489 = __builtin_shufflevector(__s0_489, __s0_489, 1, 0); \ + int32x2_t __rev1_489; __rev1_489 = __builtin_shufflevector(__s1_489, __s1_489, 1, 0); \ + int32x4_t __rev2_489; __rev2_489 = __builtin_shufflevector(__s2_489, __s2_489, 3, 2, 1, 0); \ + __ret_489 = __rev0_489 + __noswap_vmull_s32(__rev1_489, __noswap_splat_laneq_s32(__rev2_489, __p3_489)); \ + __ret_489 = __builtin_shufflevector(__ret_489, __ret_489, 1, 0); \ + __ret_489; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ + int32x4_t __ret_490; \ + int32x4_t __s0_490 = __p0_490; \ + int16x4_t __s1_490 = __p1_490; \ + int16x8_t __s2_490 = __p2_490; \ + __ret_490 = __s0_490 + vmull_s16(__s1_490, splat_laneq_s16(__s2_490, __p3_490)); \ + __ret_490; \ +}) +#else +#define vmlal_laneq_s16(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ + int32x4_t __ret_491; \ + int32x4_t __s0_491 = __p0_491; \ + int16x4_t __s1_491 = __p1_491; \ + int16x8_t __s2_491 = __p2_491; \ + int32x4_t __rev0_491; __rev0_491 = __builtin_shufflevector(__s0_491, __s0_491, 3, 2, 1, 0); \ + int16x4_t __rev1_491; __rev1_491 = __builtin_shufflevector(__s1_491, __s1_491, 3, 2, 1, 0); \ + int16x8_t __rev2_491; __rev2_491 = __builtin_shufflevector(__s2_491, __s2_491, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_491 = __rev0_491 + __noswap_vmull_s16(__rev1_491, __noswap_splat_laneq_s16(__rev2_491, __p3_491)); \ + __ret_491 = __builtin_shufflevector(__ret_491, __ret_491, 3, 2, 1, 0); \ + __ret_491; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -49663,552 +51409,552 @@ __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) } #endif -__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = __p0 - __p1 * __p2; return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ - uint32x4_t __ret_471; \ - uint32x4_t __s0_471 = __p0_471; \ - uint32x4_t __s1_471 = __p1_471; \ - uint32x4_t __s2_471 = __p2_471; \ - __ret_471 = __s0_471 - __s1_471 * splatq_laneq_u32(__s2_471, __p3_471); \ - __ret_471; \ -}) -#else -#define vmlsq_laneq_u32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ - uint32x4_t __ret_472; \ - uint32x4_t __s0_472 = __p0_472; \ - uint32x4_t __s1_472 = __p1_472; \ - uint32x4_t __s2_472 = __p2_472; \ - uint32x4_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \ - uint32x4_t __rev1_472; __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \ - uint32x4_t __rev2_472; __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \ - __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_u32(__rev2_472, __p3_472); \ - __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \ - __ret_472; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u16(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ - uint16x8_t __ret_473; \ - uint16x8_t __s0_473 = __p0_473; \ - uint16x8_t __s1_473 = __p1_473; \ - uint16x8_t __s2_473 = __p2_473; \ - __ret_473 = __s0_473 - __s1_473 * splatq_laneq_u16(__s2_473, __p3_473); \ - __ret_473; \ -}) -#else -#define vmlsq_laneq_u16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ - uint16x8_t __ret_474; \ - uint16x8_t __s0_474 = __p0_474; \ - uint16x8_t __s1_474 = __p1_474; \ - uint16x8_t __s2_474 = __p2_474; \ - uint16x8_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_474; __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_474; __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_u16(__rev2_474, __p3_474); \ - __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_f32(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ - float32x4_t __ret_475; \ - float32x4_t __s0_475 = __p0_475; \ - float32x4_t __s1_475 = __p1_475; \ - float32x4_t __s2_475 = __p2_475; \ - __ret_475 = __s0_475 - __s1_475 * splatq_laneq_f32(__s2_475, __p3_475); \ - __ret_475; \ -}) -#else -#define vmlsq_laneq_f32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ - float32x4_t __ret_476; \ - float32x4_t __s0_476 = __p0_476; \ - float32x4_t __s1_476 = __p1_476; \ - float32x4_t __s2_476 = __p2_476; \ - float32x4_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 3, 2, 1, 0); \ - float32x4_t __rev1_476; __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 3, 2, 1, 0); \ - float32x4_t __rev2_476; __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 3, 2, 1, 0); \ - __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_f32(__rev2_476, __p3_476); \ - __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 3, 2, 1, 0); \ - __ret_476; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ - int32x4_t __ret_477; \ - int32x4_t __s0_477 = __p0_477; \ - int32x4_t __s1_477 = __p1_477; \ - int32x4_t __s2_477 = __p2_477; \ - __ret_477 = __s0_477 - __s1_477 * splatq_laneq_s32(__s2_477, __p3_477); \ - __ret_477; \ -}) -#else -#define vmlsq_laneq_s32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ - int32x4_t __ret_478; \ - int32x4_t __s0_478 = __p0_478; \ - int32x4_t __s1_478 = __p1_478; \ - int32x4_t __s2_478 = __p2_478; \ - int32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ - int32x4_t __rev1_478; __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 3, 2, 1, 0); \ - int32x4_t __rev2_478; __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \ - __ret_478 = __rev0_478 - __rev1_478 * __noswap_splatq_laneq_s32(__rev2_478, __p3_478); \ - __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ - __ret_478; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ - int16x8_t __ret_479; \ - int16x8_t __s0_479 = __p0_479; \ - int16x8_t __s1_479 = __p1_479; \ - int16x8_t __s2_479 = __p2_479; \ - __ret_479 = __s0_479 - __s1_479 * splatq_laneq_s16(__s2_479, __p3_479); \ - __ret_479; \ -}) -#else -#define vmlsq_laneq_s16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ - int16x8_t __ret_480; \ - int16x8_t __s0_480 = __p0_480; \ - int16x8_t __s1_480 = __p1_480; \ - int16x8_t __s2_480 = __p2_480; \ - int16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_480; __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_480; __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480 = __rev0_480 - __rev1_480 * __noswap_splatq_laneq_s16(__rev2_480, __p3_480); \ - __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ - uint32x2_t __ret_481; \ - uint32x2_t __s0_481 = __p0_481; \ - uint32x2_t __s1_481 = __p1_481; \ - uint32x4_t __s2_481 = __p2_481; \ - __ret_481 = __s0_481 - __s1_481 * splat_laneq_u32(__s2_481, __p3_481); \ - __ret_481; \ -}) -#else -#define vmls_laneq_u32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ - uint32x2_t __ret_482; \ - uint32x2_t __s0_482 = __p0_482; \ - uint32x2_t __s1_482 = __p1_482; \ - uint32x4_t __s2_482 = __p2_482; \ - uint32x2_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \ - uint32x2_t __rev1_482; __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \ - uint32x4_t __rev2_482; __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \ - __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_u32(__rev2_482, __p3_482); \ - __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \ - __ret_482; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ - uint16x4_t __ret_483; \ - uint16x4_t __s0_483 = __p0_483; \ - uint16x4_t __s1_483 = __p1_483; \ - uint16x8_t __s2_483 = __p2_483; \ - __ret_483 = __s0_483 - __s1_483 * splat_laneq_u16(__s2_483, __p3_483); \ - __ret_483; \ -}) -#else -#define vmls_laneq_u16(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ - uint16x4_t __ret_484; \ - uint16x4_t __s0_484 = __p0_484; \ - uint16x4_t __s1_484 = __p1_484; \ - uint16x8_t __s2_484 = __p2_484; \ - uint16x4_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \ - uint16x4_t __rev1_484; __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 3, 2, 1, 0); \ - uint16x8_t __rev2_484; __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_u16(__rev2_484, __p3_484); \ - __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 3, 2, 1, 0); \ - __ret_484; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_f32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ - float32x2_t __ret_485; \ - float32x2_t __s0_485 = __p0_485; \ - float32x2_t __s1_485 = __p1_485; \ - float32x4_t __s2_485 = __p2_485; \ - __ret_485 = __s0_485 - __s1_485 * splat_laneq_f32(__s2_485, __p3_485); \ - __ret_485; \ -}) -#else -#define vmls_laneq_f32(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ - float32x2_t __ret_486; \ - float32x2_t __s0_486 = __p0_486; \ - float32x2_t __s1_486 = __p1_486; \ - float32x4_t __s2_486 = __p2_486; \ - float32x2_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 1, 0); \ - float32x2_t __rev1_486; __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 1, 0); \ - float32x4_t __rev2_486; __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 3, 2, 1, 0); \ - __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_f32(__rev2_486, __p3_486); \ - __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \ - __ret_486; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ - int32x2_t __ret_487; \ - int32x2_t __s0_487 = __p0_487; \ - int32x2_t __s1_487 = __p1_487; \ - int32x4_t __s2_487 = __p2_487; \ - __ret_487 = __s0_487 - __s1_487 * splat_laneq_s32(__s2_487, __p3_487); \ - __ret_487; \ -}) -#else -#define vmls_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ - int32x2_t __ret_488; \ - int32x2_t __s0_488 = __p0_488; \ - int32x2_t __s1_488 = __p1_488; \ - int32x4_t __s2_488 = __p2_488; \ - int32x2_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \ - int32x2_t __rev1_488; __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 1, 0); \ - int32x4_t __rev2_488; __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 3, 2, 1, 0); \ - __ret_488 = __rev0_488 - __rev1_488 * __noswap_splat_laneq_s32(__rev2_488, __p3_488); \ - __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \ - __ret_488; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ - int16x4_t __ret_489; \ - int16x4_t __s0_489 = __p0_489; \ - int16x4_t __s1_489 = __p1_489; \ - int16x8_t __s2_489 = __p2_489; \ - __ret_489 = __s0_489 - __s1_489 * splat_laneq_s16(__s2_489, __p3_489); \ - __ret_489; \ -}) -#else -#define vmls_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ - int16x4_t __ret_490; \ - int16x4_t __s0_490 = __p0_490; \ - int16x4_t __s1_490 = __p1_490; \ - int16x8_t __s2_490 = __p2_490; \ - int16x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ - int16x4_t __rev1_490; __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 3, 2, 1, 0); \ - int16x8_t __rev2_490; __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_490 = __rev0_490 - __rev1_490 * __noswap_splat_laneq_s16(__rev2_490, __p3_490); \ - __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \ - __ret_490; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ - uint64x2_t __ret_491; \ - uint64x2_t __s0_491 = __p0_491; \ - uint32x4_t __s1_491 = __p1_491; \ - uint32x2_t __s2_491 = __p2_491; \ - __ret_491 = __s0_491 - vmull_u32(vget_high_u32(__s1_491), splat_lane_u32(__s2_491, __p3_491)); \ - __ret_491; \ -}) -#else -#define vmlsl_high_lane_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ - uint64x2_t __ret_492; \ - uint64x2_t __s0_492 = __p0_492; \ +#define vmlsq_laneq_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ + uint32x4_t __ret_492; \ + uint32x4_t __s0_492 = __p0_492; \ uint32x4_t __s1_492 = __p1_492; \ - uint32x2_t __s2_492 = __p2_492; \ - uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ - uint32x4_t __rev1_492; __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \ - uint32x2_t __rev2_492; __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \ - __ret_492 = __rev0_492 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_492), __noswap_splat_lane_u32(__rev2_492, __p3_492)); \ - __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \ + uint32x4_t __s2_492 = __p2_492; \ + __ret_492 = __s0_492 - __s1_492 * splatq_laneq_u32(__s2_492, __p3_492); \ __ret_492; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ +#else +#define vmlsq_laneq_u32(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ uint32x4_t __ret_493; \ uint32x4_t __s0_493 = __p0_493; \ - uint16x8_t __s1_493 = __p1_493; \ - uint16x4_t __s2_493 = __p2_493; \ - __ret_493 = __s0_493 - vmull_u16(vget_high_u16(__s1_493), splat_lane_u16(__s2_493, __p3_493)); \ + uint32x4_t __s1_493 = __p1_493; \ + uint32x4_t __s2_493 = __p2_493; \ + uint32x4_t __rev0_493; __rev0_493 = __builtin_shufflevector(__s0_493, __s0_493, 3, 2, 1, 0); \ + uint32x4_t __rev1_493; __rev1_493 = __builtin_shufflevector(__s1_493, __s1_493, 3, 2, 1, 0); \ + uint32x4_t __rev2_493; __rev2_493 = __builtin_shufflevector(__s2_493, __s2_493, 3, 2, 1, 0); \ + __ret_493 = __rev0_493 - __rev1_493 * __noswap_splatq_laneq_u32(__rev2_493, __p3_493); \ + __ret_493 = __builtin_shufflevector(__ret_493, __ret_493, 3, 2, 1, 0); \ __ret_493; \ }) -#else -#define vmlsl_high_lane_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ - uint32x4_t __ret_494; \ - uint32x4_t __s0_494 = __p0_494; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + uint16x8_t __ret_494; \ + uint16x8_t __s0_494 = __p0_494; \ uint16x8_t __s1_494 = __p1_494; \ - uint16x4_t __s2_494 = __p2_494; \ - uint32x4_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \ - uint16x8_t __rev1_494; __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_494; __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \ - __ret_494 = __rev0_494 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_494), __noswap_splat_lane_u16(__rev2_494, __p3_494)); \ - __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ + uint16x8_t __s2_494 = __p2_494; \ + __ret_494 = __s0_494 - __s1_494 * splatq_laneq_u16(__s2_494, __p3_494); \ __ret_494; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ - int64x2_t __ret_495; \ - int64x2_t __s0_495 = __p0_495; \ - int32x4_t __s1_495 = __p1_495; \ - int32x2_t __s2_495 = __p2_495; \ - __ret_495 = __s0_495 - vmull_s32(vget_high_s32(__s1_495), splat_lane_s32(__s2_495, __p3_495)); \ +#else +#define vmlsq_laneq_u16(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + uint16x8_t __ret_495; \ + uint16x8_t __s0_495 = __p0_495; \ + uint16x8_t __s1_495 = __p1_495; \ + uint16x8_t __s2_495 = __p2_495; \ + uint16x8_t __rev0_495; __rev0_495 = __builtin_shufflevector(__s0_495, __s0_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_495; __rev1_495 = __builtin_shufflevector(__s1_495, __s1_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_495; __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_495 = __rev0_495 - __rev1_495 * __noswap_splatq_laneq_u16(__rev2_495, __p3_495); \ + __ret_495 = __builtin_shufflevector(__ret_495, __ret_495, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_495; \ }) -#else -#define vmlsl_high_lane_s32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ - int64x2_t __ret_496; \ - int64x2_t __s0_496 = __p0_496; \ - int32x4_t __s1_496 = __p1_496; \ - int32x2_t __s2_496 = __p2_496; \ - int64x2_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \ - int32x4_t __rev1_496; __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \ - int32x2_t __rev2_496; __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 1, 0); \ - __ret_496 = __rev0_496 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_496), __noswap_splat_lane_s32(__rev2_496, __p3_496)); \ - __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_f32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + float32x4_t __ret_496; \ + float32x4_t __s0_496 = __p0_496; \ + float32x4_t __s1_496 = __p1_496; \ + float32x4_t __s2_496 = __p2_496; \ + __ret_496 = __s0_496 - __s1_496 * splatq_laneq_f32(__s2_496, __p3_496); \ __ret_496; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ - int32x4_t __ret_497; \ - int32x4_t __s0_497 = __p0_497; \ - int16x8_t __s1_497 = __p1_497; \ - int16x4_t __s2_497 = __p2_497; \ - __ret_497 = __s0_497 - vmull_s16(vget_high_s16(__s1_497), splat_lane_s16(__s2_497, __p3_497)); \ +#else +#define vmlsq_laneq_f32(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + float32x4_t __ret_497; \ + float32x4_t __s0_497 = __p0_497; \ + float32x4_t __s1_497 = __p1_497; \ + float32x4_t __s2_497 = __p2_497; \ + float32x4_t __rev0_497; __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 3, 2, 1, 0); \ + float32x4_t __rev1_497; __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 3, 2, 1, 0); \ + float32x4_t __rev2_497; __rev2_497 = __builtin_shufflevector(__s2_497, __s2_497, 3, 2, 1, 0); \ + __ret_497 = __rev0_497 - __rev1_497 * __noswap_splatq_laneq_f32(__rev2_497, __p3_497); \ + __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 3, 2, 1, 0); \ __ret_497; \ }) -#else -#define vmlsl_high_lane_s16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ int32x4_t __ret_498; \ int32x4_t __s0_498 = __p0_498; \ - int16x8_t __s1_498 = __p1_498; \ - int16x4_t __s2_498 = __p2_498; \ - int32x4_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \ - int16x8_t __rev1_498; __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_498; __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 3, 2, 1, 0); \ - __ret_498 = __rev0_498 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_498), __noswap_splat_lane_s16(__rev2_498, __p3_498)); \ - __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \ + int32x4_t __s1_498 = __p1_498; \ + int32x4_t __s2_498 = __p2_498; \ + __ret_498 = __s0_498 - __s1_498 * splatq_laneq_s32(__s2_498, __p3_498); \ __ret_498; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ - uint64x2_t __ret_499; \ - uint64x2_t __s0_499 = __p0_499; \ - uint32x4_t __s1_499 = __p1_499; \ - uint32x4_t __s2_499 = __p2_499; \ - __ret_499 = __s0_499 - vmull_u32(vget_high_u32(__s1_499), splat_laneq_u32(__s2_499, __p3_499)); \ +#else +#define vmlsq_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + int32x4_t __ret_499; \ + int32x4_t __s0_499 = __p0_499; \ + int32x4_t __s1_499 = __p1_499; \ + int32x4_t __s2_499 = __p2_499; \ + int32x4_t __rev0_499; __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \ + int32x4_t __rev1_499; __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \ + int32x4_t __rev2_499; __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 3, 2, 1, 0); \ + __ret_499 = __rev0_499 - __rev1_499 * __noswap_splatq_laneq_s32(__rev2_499, __p3_499); \ + __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \ __ret_499; \ }) -#else -#define vmlsl_high_laneq_u32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ - uint64x2_t __ret_500; \ - uint64x2_t __s0_500 = __p0_500; \ - uint32x4_t __s1_500 = __p1_500; \ - uint32x4_t __s2_500 = __p2_500; \ - uint64x2_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \ - uint32x4_t __rev1_500; __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \ - uint32x4_t __rev2_500; __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \ - __ret_500 = __rev0_500 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_500), __noswap_splat_laneq_u32(__rev2_500, __p3_500)); \ - __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s16(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + int16x8_t __ret_500; \ + int16x8_t __s0_500 = __p0_500; \ + int16x8_t __s1_500 = __p1_500; \ + int16x8_t __s2_500 = __p2_500; \ + __ret_500 = __s0_500 - __s1_500 * splatq_laneq_s16(__s2_500, __p3_500); \ __ret_500; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ - uint32x4_t __ret_501; \ - uint32x4_t __s0_501 = __p0_501; \ - uint16x8_t __s1_501 = __p1_501; \ - uint16x8_t __s2_501 = __p2_501; \ - __ret_501 = __s0_501 - vmull_u16(vget_high_u16(__s1_501), splat_laneq_u16(__s2_501, __p3_501)); \ +#else +#define vmlsq_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + int16x8_t __ret_501; \ + int16x8_t __s0_501 = __p0_501; \ + int16x8_t __s1_501 = __p1_501; \ + int16x8_t __s2_501 = __p2_501; \ + int16x8_t __rev0_501; __rev0_501 = __builtin_shufflevector(__s0_501, __s0_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_501; __rev1_501 = __builtin_shufflevector(__s1_501, __s1_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_501; __rev2_501 = __builtin_shufflevector(__s2_501, __s2_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_501 = __rev0_501 - __rev1_501 * __noswap_splatq_laneq_s16(__rev2_501, __p3_501); \ + __ret_501 = __builtin_shufflevector(__ret_501, __ret_501, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_501; \ }) -#else -#define vmlsl_high_laneq_u16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ - uint32x4_t __ret_502; \ - uint32x4_t __s0_502 = __p0_502; \ - uint16x8_t __s1_502 = __p1_502; \ - uint16x8_t __s2_502 = __p2_502; \ - uint32x4_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \ - uint16x8_t __rev1_502; __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_502; __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_502 = __rev0_502 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_502), __noswap_splat_laneq_u16(__rev2_502, __p3_502)); \ - __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + uint32x2_t __ret_502; \ + uint32x2_t __s0_502 = __p0_502; \ + uint32x2_t __s1_502 = __p1_502; \ + uint32x4_t __s2_502 = __p2_502; \ + __ret_502 = __s0_502 - __s1_502 * splat_laneq_u32(__s2_502, __p3_502); \ __ret_502; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ - int64x2_t __ret_503; \ - int64x2_t __s0_503 = __p0_503; \ - int32x4_t __s1_503 = __p1_503; \ - int32x4_t __s2_503 = __p2_503; \ - __ret_503 = __s0_503 - vmull_s32(vget_high_s32(__s1_503), splat_laneq_s32(__s2_503, __p3_503)); \ +#else +#define vmls_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + uint32x2_t __ret_503; \ + uint32x2_t __s0_503 = __p0_503; \ + uint32x2_t __s1_503 = __p1_503; \ + uint32x4_t __s2_503 = __p2_503; \ + uint32x2_t __rev0_503; __rev0_503 = __builtin_shufflevector(__s0_503, __s0_503, 1, 0); \ + uint32x2_t __rev1_503; __rev1_503 = __builtin_shufflevector(__s1_503, __s1_503, 1, 0); \ + uint32x4_t __rev2_503; __rev2_503 = __builtin_shufflevector(__s2_503, __s2_503, 3, 2, 1, 0); \ + __ret_503 = __rev0_503 - __rev1_503 * __noswap_splat_laneq_u32(__rev2_503, __p3_503); \ + __ret_503 = __builtin_shufflevector(__ret_503, __ret_503, 1, 0); \ __ret_503; \ }) -#else -#define vmlsl_high_laneq_s32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ - int64x2_t __ret_504; \ - int64x2_t __s0_504 = __p0_504; \ - int32x4_t __s1_504 = __p1_504; \ - int32x4_t __s2_504 = __p2_504; \ - int64x2_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \ - int32x4_t __rev1_504; __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 3, 2, 1, 0); \ - int32x4_t __rev2_504; __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \ - __ret_504 = __rev0_504 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_504), __noswap_splat_laneq_s32(__rev2_504, __p3_504)); \ - __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u16(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + uint16x4_t __ret_504; \ + uint16x4_t __s0_504 = __p0_504; \ + uint16x4_t __s1_504 = __p1_504; \ + uint16x8_t __s2_504 = __p2_504; \ + __ret_504 = __s0_504 - __s1_504 * splat_laneq_u16(__s2_504, __p3_504); \ __ret_504; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ - int32x4_t __ret_505; \ - int32x4_t __s0_505 = __p0_505; \ - int16x8_t __s1_505 = __p1_505; \ - int16x8_t __s2_505 = __p2_505; \ - __ret_505 = __s0_505 - vmull_s16(vget_high_s16(__s1_505), splat_laneq_s16(__s2_505, __p3_505)); \ +#else +#define vmls_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + uint16x4_t __ret_505; \ + uint16x4_t __s0_505 = __p0_505; \ + uint16x4_t __s1_505 = __p1_505; \ + uint16x8_t __s2_505 = __p2_505; \ + uint16x4_t __rev0_505; __rev0_505 = __builtin_shufflevector(__s0_505, __s0_505, 3, 2, 1, 0); \ + uint16x4_t __rev1_505; __rev1_505 = __builtin_shufflevector(__s1_505, __s1_505, 3, 2, 1, 0); \ + uint16x8_t __rev2_505; __rev2_505 = __builtin_shufflevector(__s2_505, __s2_505, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_505 = __rev0_505 - __rev1_505 * __noswap_splat_laneq_u16(__rev2_505, __p3_505); \ + __ret_505 = __builtin_shufflevector(__ret_505, __ret_505, 3, 2, 1, 0); \ __ret_505; \ }) -#else -#define vmlsl_high_laneq_s16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ - int32x4_t __ret_506; \ - int32x4_t __s0_506 = __p0_506; \ - int16x8_t __s1_506 = __p1_506; \ - int16x8_t __s2_506 = __p2_506; \ - int32x4_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \ - int16x8_t __rev1_506; __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_506; __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_506 = __rev0_506 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_506), __noswap_splat_laneq_s16(__rev2_506, __p3_506)); \ - __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + float32x2_t __ret_506; \ + float32x2_t __s0_506 = __p0_506; \ + float32x2_t __s1_506 = __p1_506; \ + float32x4_t __s2_506 = __p2_506; \ + __ret_506 = __s0_506 - __s1_506 * splat_laneq_f32(__s2_506, __p3_506); \ __ret_506; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ - uint64x2_t __ret_507; \ - uint64x2_t __s0_507 = __p0_507; \ - uint32x2_t __s1_507 = __p1_507; \ - uint32x4_t __s2_507 = __p2_507; \ - __ret_507 = __s0_507 - vmull_u32(__s1_507, splat_laneq_u32(__s2_507, __p3_507)); \ +#else +#define vmls_laneq_f32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + float32x2_t __ret_507; \ + float32x2_t __s0_507 = __p0_507; \ + float32x2_t __s1_507 = __p1_507; \ + float32x4_t __s2_507 = __p2_507; \ + float32x2_t __rev0_507; __rev0_507 = __builtin_shufflevector(__s0_507, __s0_507, 1, 0); \ + float32x2_t __rev1_507; __rev1_507 = __builtin_shufflevector(__s1_507, __s1_507, 1, 0); \ + float32x4_t __rev2_507; __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, 3, 2, 1, 0); \ + __ret_507 = __rev0_507 - __rev1_507 * __noswap_splat_laneq_f32(__rev2_507, __p3_507); \ + __ret_507 = __builtin_shufflevector(__ret_507, __ret_507, 1, 0); \ __ret_507; \ }) -#else -#define vmlsl_laneq_u32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ - uint64x2_t __ret_508; \ - uint64x2_t __s0_508 = __p0_508; \ - uint32x2_t __s1_508 = __p1_508; \ - uint32x4_t __s2_508 = __p2_508; \ - uint64x2_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \ - uint32x2_t __rev1_508; __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \ - uint32x4_t __rev2_508; __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \ - __ret_508 = __rev0_508 - __noswap_vmull_u32(__rev1_508, __noswap_splat_laneq_u32(__rev2_508, __p3_508)); \ - __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + int32x2_t __ret_508; \ + int32x2_t __s0_508 = __p0_508; \ + int32x2_t __s1_508 = __p1_508; \ + int32x4_t __s2_508 = __p2_508; \ + __ret_508 = __s0_508 - __s1_508 * splat_laneq_s32(__s2_508, __p3_508); \ __ret_508; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ - uint32x4_t __ret_509; \ - uint32x4_t __s0_509 = __p0_509; \ - uint16x4_t __s1_509 = __p1_509; \ - uint16x8_t __s2_509 = __p2_509; \ - __ret_509 = __s0_509 - vmull_u16(__s1_509, splat_laneq_u16(__s2_509, __p3_509)); \ +#else +#define vmls_laneq_s32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + int32x2_t __ret_509; \ + int32x2_t __s0_509 = __p0_509; \ + int32x2_t __s1_509 = __p1_509; \ + int32x4_t __s2_509 = __p2_509; \ + int32x2_t __rev0_509; __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, 1, 0); \ + int32x2_t __rev1_509; __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, 1, 0); \ + int32x4_t __rev2_509; __rev2_509 = __builtin_shufflevector(__s2_509, __s2_509, 3, 2, 1, 0); \ + __ret_509 = __rev0_509 - __rev1_509 * __noswap_splat_laneq_s32(__rev2_509, __p3_509); \ + __ret_509 = __builtin_shufflevector(__ret_509, __ret_509, 1, 0); \ __ret_509; \ }) -#else -#define vmlsl_laneq_u16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ - uint32x4_t __ret_510; \ - uint32x4_t __s0_510 = __p0_510; \ - uint16x4_t __s1_510 = __p1_510; \ - uint16x8_t __s2_510 = __p2_510; \ - uint32x4_t __rev0_510; __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \ - uint16x4_t __rev1_510; __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \ - uint16x8_t __rev2_510; __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_510 = __rev0_510 - __noswap_vmull_u16(__rev1_510, __noswap_splat_laneq_u16(__rev2_510, __p3_510)); \ - __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + int16x4_t __ret_510; \ + int16x4_t __s0_510 = __p0_510; \ + int16x4_t __s1_510 = __p1_510; \ + int16x8_t __s2_510 = __p2_510; \ + __ret_510 = __s0_510 - __s1_510 * splat_laneq_s16(__s2_510, __p3_510); \ __ret_510; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ - int64x2_t __ret_511; \ - int64x2_t __s0_511 = __p0_511; \ - int32x2_t __s1_511 = __p1_511; \ - int32x4_t __s2_511 = __p2_511; \ - __ret_511 = __s0_511 - vmull_s32(__s1_511, splat_laneq_s32(__s2_511, __p3_511)); \ +#else +#define vmls_laneq_s16(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + int16x4_t __ret_511; \ + int16x4_t __s0_511 = __p0_511; \ + int16x4_t __s1_511 = __p1_511; \ + int16x8_t __s2_511 = __p2_511; \ + int16x4_t __rev0_511; __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, 3, 2, 1, 0); \ + int16x4_t __rev1_511; __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, 3, 2, 1, 0); \ + int16x8_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_511 = __rev0_511 - __rev1_511 * __noswap_splat_laneq_s16(__rev2_511, __p3_511); \ + __ret_511 = __builtin_shufflevector(__ret_511, __ret_511, 3, 2, 1, 0); \ __ret_511; \ }) -#else -#define vmlsl_laneq_s32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ - int64x2_t __ret_512; \ - int64x2_t __s0_512 = __p0_512; \ - int32x2_t __s1_512 = __p1_512; \ - int32x4_t __s2_512 = __p2_512; \ - int64x2_t __rev0_512; __rev0_512 = __builtin_shufflevector(__s0_512, __s0_512, 1, 0); \ - int32x2_t __rev1_512; __rev1_512 = __builtin_shufflevector(__s1_512, __s1_512, 1, 0); \ - int32x4_t __rev2_512; __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 3, 2, 1, 0); \ - __ret_512 = __rev0_512 - __noswap_vmull_s32(__rev1_512, __noswap_splat_laneq_s32(__rev2_512, __p3_512)); \ - __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + uint64x2_t __ret_512; \ + uint64x2_t __s0_512 = __p0_512; \ + uint32x4_t __s1_512 = __p1_512; \ + uint32x2_t __s2_512 = __p2_512; \ + __ret_512 = __s0_512 - vmull_u32(vget_high_u32(__s1_512), splat_lane_u32(__s2_512, __p3_512)); \ __ret_512; \ }) +#else +#define vmlsl_high_lane_u32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + uint64x2_t __ret_513; \ + uint64x2_t __s0_513 = __p0_513; \ + uint32x4_t __s1_513 = __p1_513; \ + uint32x2_t __s2_513 = __p2_513; \ + uint64x2_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \ + uint32x4_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 3, 2, 1, 0); \ + uint32x2_t __rev2_513; __rev2_513 = __builtin_shufflevector(__s2_513, __s2_513, 1, 0); \ + __ret_513 = __rev0_513 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_513), __noswap_splat_lane_u32(__rev2_513, __p3_513)); \ + __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \ + __ret_513; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s16(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ - int32x4_t __ret_513; \ - int32x4_t __s0_513 = __p0_513; \ - int16x4_t __s1_513 = __p1_513; \ - int16x8_t __s2_513 = __p2_513; \ - __ret_513 = __s0_513 - vmull_s16(__s1_513, splat_laneq_s16(__s2_513, __p3_513)); \ - __ret_513; \ +#define vmlsl_high_lane_u16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + uint32x4_t __ret_514; \ + uint32x4_t __s0_514 = __p0_514; \ + uint16x8_t __s1_514 = __p1_514; \ + uint16x4_t __s2_514 = __p2_514; \ + __ret_514 = __s0_514 - vmull_u16(vget_high_u16(__s1_514), splat_lane_u16(__s2_514, __p3_514)); \ + __ret_514; \ }) #else -#define vmlsl_laneq_s16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ - int32x4_t __ret_514; \ - int32x4_t __s0_514 = __p0_514; \ - int16x4_t __s1_514 = __p1_514; \ - int16x8_t __s2_514 = __p2_514; \ - int32x4_t __rev0_514; __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 3, 2, 1, 0); \ - int16x4_t __rev1_514; __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 3, 2, 1, 0); \ - int16x8_t __rev2_514; __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_514 = __rev0_514 - __noswap_vmull_s16(__rev1_514, __noswap_splat_laneq_s16(__rev2_514, __p3_514)); \ - __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 3, 2, 1, 0); \ - __ret_514; \ +#define vmlsl_high_lane_u16(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ + uint32x4_t __ret_515; \ + uint32x4_t __s0_515 = __p0_515; \ + uint16x8_t __s1_515 = __p1_515; \ + uint16x4_t __s2_515 = __p2_515; \ + uint32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \ + uint16x8_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 3, 2, 1, 0); \ + __ret_515 = __rev0_515 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_515), __noswap_splat_lane_u16(__rev2_515, __p3_515)); \ + __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \ + __ret_515; \ }) #endif -__ai poly64x1_t vmov_n_p64(poly64_t __p0) { +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ + int64x2_t __ret_516; \ + int64x2_t __s0_516 = __p0_516; \ + int32x4_t __s1_516 = __p1_516; \ + int32x2_t __s2_516 = __p2_516; \ + __ret_516 = __s0_516 - vmull_s32(vget_high_s32(__s1_516), splat_lane_s32(__s2_516, __p3_516)); \ + __ret_516; \ +}) +#else +#define vmlsl_high_lane_s32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ + int64x2_t __ret_517; \ + int64x2_t __s0_517 = __p0_517; \ + int32x4_t __s1_517 = __p1_517; \ + int32x2_t __s2_517 = __p2_517; \ + int64x2_t __rev0_517; __rev0_517 = __builtin_shufflevector(__s0_517, __s0_517, 1, 0); \ + int32x4_t __rev1_517; __rev1_517 = __builtin_shufflevector(__s1_517, __s1_517, 3, 2, 1, 0); \ + int32x2_t __rev2_517; __rev2_517 = __builtin_shufflevector(__s2_517, __s2_517, 1, 0); \ + __ret_517 = __rev0_517 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_517), __noswap_splat_lane_s32(__rev2_517, __p3_517)); \ + __ret_517 = __builtin_shufflevector(__ret_517, __ret_517, 1, 0); \ + __ret_517; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ + int32x4_t __ret_518; \ + int32x4_t __s0_518 = __p0_518; \ + int16x8_t __s1_518 = __p1_518; \ + int16x4_t __s2_518 = __p2_518; \ + __ret_518 = __s0_518 - vmull_s16(vget_high_s16(__s1_518), splat_lane_s16(__s2_518, __p3_518)); \ + __ret_518; \ +}) +#else +#define vmlsl_high_lane_s16(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ + int32x4_t __ret_519; \ + int32x4_t __s0_519 = __p0_519; \ + int16x8_t __s1_519 = __p1_519; \ + int16x4_t __s2_519 = __p2_519; \ + int32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__s0_519, __s0_519, 3, 2, 1, 0); \ + int16x8_t __rev1_519; __rev1_519 = __builtin_shufflevector(__s1_519, __s1_519, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_519; __rev2_519 = __builtin_shufflevector(__s2_519, __s2_519, 3, 2, 1, 0); \ + __ret_519 = __rev0_519 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_519), __noswap_splat_lane_s16(__rev2_519, __p3_519)); \ + __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 3, 2, 1, 0); \ + __ret_519; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ + uint64x2_t __ret_520; \ + uint64x2_t __s0_520 = __p0_520; \ + uint32x4_t __s1_520 = __p1_520; \ + uint32x4_t __s2_520 = __p2_520; \ + __ret_520 = __s0_520 - vmull_u32(vget_high_u32(__s1_520), splat_laneq_u32(__s2_520, __p3_520)); \ + __ret_520; \ +}) +#else +#define vmlsl_high_laneq_u32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ + uint64x2_t __ret_521; \ + uint64x2_t __s0_521 = __p0_521; \ + uint32x4_t __s1_521 = __p1_521; \ + uint32x4_t __s2_521 = __p2_521; \ + uint64x2_t __rev0_521; __rev0_521 = __builtin_shufflevector(__s0_521, __s0_521, 1, 0); \ + uint32x4_t __rev1_521; __rev1_521 = __builtin_shufflevector(__s1_521, __s1_521, 3, 2, 1, 0); \ + uint32x4_t __rev2_521; __rev2_521 = __builtin_shufflevector(__s2_521, __s2_521, 3, 2, 1, 0); \ + __ret_521 = __rev0_521 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_521), __noswap_splat_laneq_u32(__rev2_521, __p3_521)); \ + __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 1, 0); \ + __ret_521; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u16(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ + uint32x4_t __ret_522; \ + uint32x4_t __s0_522 = __p0_522; \ + uint16x8_t __s1_522 = __p1_522; \ + uint16x8_t __s2_522 = __p2_522; \ + __ret_522 = __s0_522 - vmull_u16(vget_high_u16(__s1_522), splat_laneq_u16(__s2_522, __p3_522)); \ + __ret_522; \ +}) +#else +#define vmlsl_high_laneq_u16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ + uint32x4_t __ret_523; \ + uint32x4_t __s0_523 = __p0_523; \ + uint16x8_t __s1_523 = __p1_523; \ + uint16x8_t __s2_523 = __p2_523; \ + uint32x4_t __rev0_523; __rev0_523 = __builtin_shufflevector(__s0_523, __s0_523, 3, 2, 1, 0); \ + uint16x8_t __rev1_523; __rev1_523 = __builtin_shufflevector(__s1_523, __s1_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_523; __rev2_523 = __builtin_shufflevector(__s2_523, __s2_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_523 = __rev0_523 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_523), __noswap_splat_laneq_u16(__rev2_523, __p3_523)); \ + __ret_523 = __builtin_shufflevector(__ret_523, __ret_523, 3, 2, 1, 0); \ + __ret_523; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s32(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ + int64x2_t __ret_524; \ + int64x2_t __s0_524 = __p0_524; \ + int32x4_t __s1_524 = __p1_524; \ + int32x4_t __s2_524 = __p2_524; \ + __ret_524 = __s0_524 - vmull_s32(vget_high_s32(__s1_524), splat_laneq_s32(__s2_524, __p3_524)); \ + __ret_524; \ +}) +#else +#define vmlsl_high_laneq_s32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ + int64x2_t __ret_525; \ + int64x2_t __s0_525 = __p0_525; \ + int32x4_t __s1_525 = __p1_525; \ + int32x4_t __s2_525 = __p2_525; \ + int64x2_t __rev0_525; __rev0_525 = __builtin_shufflevector(__s0_525, __s0_525, 1, 0); \ + int32x4_t __rev1_525; __rev1_525 = __builtin_shufflevector(__s1_525, __s1_525, 3, 2, 1, 0); \ + int32x4_t __rev2_525; __rev2_525 = __builtin_shufflevector(__s2_525, __s2_525, 3, 2, 1, 0); \ + __ret_525 = __rev0_525 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_525), __noswap_splat_laneq_s32(__rev2_525, __p3_525)); \ + __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 1, 0); \ + __ret_525; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s16(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ + int32x4_t __ret_526; \ + int32x4_t __s0_526 = __p0_526; \ + int16x8_t __s1_526 = __p1_526; \ + int16x8_t __s2_526 = __p2_526; \ + __ret_526 = __s0_526 - vmull_s16(vget_high_s16(__s1_526), splat_laneq_s16(__s2_526, __p3_526)); \ + __ret_526; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ + int32x4_t __ret_527; \ + int32x4_t __s0_527 = __p0_527; \ + int16x8_t __s1_527 = __p1_527; \ + int16x8_t __s2_527 = __p2_527; \ + int32x4_t __rev0_527; __rev0_527 = __builtin_shufflevector(__s0_527, __s0_527, 3, 2, 1, 0); \ + int16x8_t __rev1_527; __rev1_527 = __builtin_shufflevector(__s1_527, __s1_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_527; __rev2_527 = __builtin_shufflevector(__s2_527, __s2_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_527 = __rev0_527 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_527), __noswap_splat_laneq_s16(__rev2_527, __p3_527)); \ + __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0); \ + __ret_527; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ + uint64x2_t __ret_528; \ + uint64x2_t __s0_528 = __p0_528; \ + uint32x2_t __s1_528 = __p1_528; \ + uint32x4_t __s2_528 = __p2_528; \ + __ret_528 = __s0_528 - vmull_u32(__s1_528, splat_laneq_u32(__s2_528, __p3_528)); \ + __ret_528; \ +}) +#else +#define vmlsl_laneq_u32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ + uint64x2_t __ret_529; \ + uint64x2_t __s0_529 = __p0_529; \ + uint32x2_t __s1_529 = __p1_529; \ + uint32x4_t __s2_529 = __p2_529; \ + uint64x2_t __rev0_529; __rev0_529 = __builtin_shufflevector(__s0_529, __s0_529, 1, 0); \ + uint32x2_t __rev1_529; __rev1_529 = __builtin_shufflevector(__s1_529, __s1_529, 1, 0); \ + uint32x4_t __rev2_529; __rev2_529 = __builtin_shufflevector(__s2_529, __s2_529, 3, 2, 1, 0); \ + __ret_529 = __rev0_529 - __noswap_vmull_u32(__rev1_529, __noswap_splat_laneq_u32(__rev2_529, __p3_529)); \ + __ret_529 = __builtin_shufflevector(__ret_529, __ret_529, 1, 0); \ + __ret_529; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ + uint32x4_t __ret_530; \ + uint32x4_t __s0_530 = __p0_530; \ + uint16x4_t __s1_530 = __p1_530; \ + uint16x8_t __s2_530 = __p2_530; \ + __ret_530 = __s0_530 - vmull_u16(__s1_530, splat_laneq_u16(__s2_530, __p3_530)); \ + __ret_530; \ +}) +#else +#define vmlsl_laneq_u16(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ + uint32x4_t __ret_531; \ + uint32x4_t __s0_531 = __p0_531; \ + uint16x4_t __s1_531 = __p1_531; \ + uint16x8_t __s2_531 = __p2_531; \ + uint32x4_t __rev0_531; __rev0_531 = __builtin_shufflevector(__s0_531, __s0_531, 3, 2, 1, 0); \ + uint16x4_t __rev1_531; __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 3, 2, 1, 0); \ + uint16x8_t __rev2_531; __rev2_531 = __builtin_shufflevector(__s2_531, __s2_531, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_531 = __rev0_531 - __noswap_vmull_u16(__rev1_531, __noswap_splat_laneq_u16(__rev2_531, __p3_531)); \ + __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); \ + __ret_531; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ + int64x2_t __ret_532; \ + int64x2_t __s0_532 = __p0_532; \ + int32x2_t __s1_532 = __p1_532; \ + int32x4_t __s2_532 = __p2_532; \ + __ret_532 = __s0_532 - vmull_s32(__s1_532, splat_laneq_s32(__s2_532, __p3_532)); \ + __ret_532; \ +}) +#else +#define vmlsl_laneq_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ + int64x2_t __ret_533; \ + int64x2_t __s0_533 = __p0_533; \ + int32x2_t __s1_533 = __p1_533; \ + int32x4_t __s2_533 = __p2_533; \ + int64x2_t __rev0_533; __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \ + int32x2_t __rev1_533; __rev1_533 = __builtin_shufflevector(__s1_533, __s1_533, 1, 0); \ + int32x4_t __rev2_533; __rev2_533 = __builtin_shufflevector(__s2_533, __s2_533, 3, 2, 1, 0); \ + __ret_533 = __rev0_533 - __noswap_vmull_s32(__rev1_533, __noswap_splat_laneq_s32(__rev2_533, __p3_533)); \ + __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \ + __ret_533; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ + int32x4_t __ret_534; \ + int32x4_t __s0_534 = __p0_534; \ + int16x4_t __s1_534 = __p1_534; \ + int16x8_t __s2_534 = __p2_534; \ + __ret_534 = __s0_534 - vmull_s16(__s1_534, splat_laneq_s16(__s2_534, __p3_534)); \ + __ret_534; \ +}) +#else +#define vmlsl_laneq_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ + int32x4_t __ret_535; \ + int32x4_t __s0_535 = __p0_535; \ + int16x4_t __s1_535 = __p1_535; \ + int16x8_t __s2_535 = __p2_535; \ + int32x4_t __rev0_535; __rev0_535 = __builtin_shufflevector(__s0_535, __s0_535, 3, 2, 1, 0); \ + int16x4_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 3, 2, 1, 0); \ + int16x8_t __rev2_535; __rev2_535 = __builtin_shufflevector(__s2_535, __s2_535, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_535 = __rev0_535 - __noswap_vmull_s16(__rev1_535, __noswap_splat_laneq_s16(__rev2_535, __p3_535)); \ + __ret_535 = __builtin_shufflevector(__ret_535, __ret_535, 3, 2, 1, 0); \ + __ret_535; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vmov_n_p64(poly64_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; return __ret; } #else -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -50217,13 +51963,13 @@ __ai poly64x2_t vmovq_n_p64(poly64_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmovq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; return __ret; } #else -__ai float64x2_t vmovq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { float64x2_t __ret; __ret = (float64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); @@ -50231,163 +51977,163 @@ __ai float64x2_t vmovq_n_f64(float64_t __p0) { } #endif -__ai float64x1_t vmov_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vmov_n_f64(float64_t __p0) { float64x1_t __ret; __ret = (float64x1_t) {__p0}; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_515) { - uint16x8_t __ret_515; - uint8x8_t __a1_515 = vget_high_u8(__p0_515); - __ret_515 = (uint16x8_t)(vshll_n_u8(__a1_515, 0)); - return __ret_515; +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_536) { + uint16x8_t __ret_536; + uint8x8_t __a1_536 = vget_high_u8(__p0_536); + __ret_536 = (uint16x8_t)(vshll_n_u8(__a1_536, 0)); + return __ret_536; } #else -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_516) { - uint16x8_t __ret_516; - uint8x16_t __rev0_516; __rev0_516 = __builtin_shufflevector(__p0_516, __p0_516, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __a1_516 = __noswap_vget_high_u8(__rev0_516); - __ret_516 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_516, 0)); - __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_516; +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_537) { + uint16x8_t __ret_537; + uint8x16_t __rev0_537; __rev0_537 = __builtin_shufflevector(__p0_537, __p0_537, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __a1_537 = __noswap_vget_high_u8(__rev0_537); + __ret_537 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_537, 0)); + __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_537; } -__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_517) { - uint16x8_t __ret_517; - uint8x8_t __a1_517 = __noswap_vget_high_u8(__p0_517); - __ret_517 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_517, 0)); - return __ret_517; +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_538) { + uint16x8_t __ret_538; + uint8x8_t __a1_538 = __noswap_vget_high_u8(__p0_538); + __ret_538 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_538, 0)); + return __ret_538; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_518) { - uint64x2_t __ret_518; - uint32x2_t __a1_518 = vget_high_u32(__p0_518); - __ret_518 = (uint64x2_t)(vshll_n_u32(__a1_518, 0)); - return __ret_518; +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_539) { + uint64x2_t __ret_539; + uint32x2_t __a1_539 = vget_high_u32(__p0_539); + __ret_539 = (uint64x2_t)(vshll_n_u32(__a1_539, 0)); + return __ret_539; } #else -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_519) { - uint64x2_t __ret_519; - uint32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__p0_519, __p0_519, 3, 2, 1, 0); - uint32x2_t __a1_519 = __noswap_vget_high_u32(__rev0_519); - __ret_519 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_519, 0)); - __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 1, 0); - return __ret_519; +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_540) { + uint64x2_t __ret_540; + uint32x4_t __rev0_540; __rev0_540 = __builtin_shufflevector(__p0_540, __p0_540, 3, 2, 1, 0); + uint32x2_t __a1_540 = __noswap_vget_high_u32(__rev0_540); + __ret_540 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_540, 0)); + __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); + return __ret_540; } -__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_520) { - uint64x2_t __ret_520; - uint32x2_t __a1_520 = __noswap_vget_high_u32(__p0_520); - __ret_520 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_520, 0)); - return __ret_520; +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_541) { + uint64x2_t __ret_541; + uint32x2_t __a1_541 = __noswap_vget_high_u32(__p0_541); + __ret_541 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_541, 0)); + return __ret_541; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_521) { - uint32x4_t __ret_521; - uint16x4_t __a1_521 = vget_high_u16(__p0_521); - __ret_521 = (uint32x4_t)(vshll_n_u16(__a1_521, 0)); - return __ret_521; +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_542) { + uint32x4_t __ret_542; + uint16x4_t __a1_542 = vget_high_u16(__p0_542); + __ret_542 = (uint32x4_t)(vshll_n_u16(__a1_542, 0)); + return __ret_542; } #else -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_522) { - uint32x4_t __ret_522; - uint16x8_t __rev0_522; __rev0_522 = __builtin_shufflevector(__p0_522, __p0_522, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __a1_522 = __noswap_vget_high_u16(__rev0_522); - __ret_522 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_522, 0)); - __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); - return __ret_522; +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_543) { + uint32x4_t __ret_543; + uint16x8_t __rev0_543; __rev0_543 = __builtin_shufflevector(__p0_543, __p0_543, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __a1_543 = __noswap_vget_high_u16(__rev0_543); + __ret_543 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_543, 0)); + __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); + return __ret_543; } -__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_523) { - uint32x4_t __ret_523; - uint16x4_t __a1_523 = __noswap_vget_high_u16(__p0_523); - __ret_523 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_523, 0)); - return __ret_523; +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_544) { + uint32x4_t __ret_544; + uint16x4_t __a1_544 = __noswap_vget_high_u16(__p0_544); + __ret_544 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_544, 0)); + return __ret_544; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_524) { - int16x8_t __ret_524; - int8x8_t __a1_524 = vget_high_s8(__p0_524); - __ret_524 = (int16x8_t)(vshll_n_s8(__a1_524, 0)); - return __ret_524; +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_545) { + int16x8_t __ret_545; + int8x8_t __a1_545 = vget_high_s8(__p0_545); + __ret_545 = (int16x8_t)(vshll_n_s8(__a1_545, 0)); + return __ret_545; } #else -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_525) { - int16x8_t __ret_525; - int8x16_t __rev0_525; __rev0_525 = __builtin_shufflevector(__p0_525, __p0_525, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __a1_525 = __noswap_vget_high_s8(__rev0_525); - __ret_525 = (int16x8_t)(__noswap_vshll_n_s8(__a1_525, 0)); - __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_525; +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_546) { + int16x8_t __ret_546; + int8x16_t __rev0_546; __rev0_546 = __builtin_shufflevector(__p0_546, __p0_546, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __a1_546 = __noswap_vget_high_s8(__rev0_546); + __ret_546 = (int16x8_t)(__noswap_vshll_n_s8(__a1_546, 0)); + __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_546; } -__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_526) { - int16x8_t __ret_526; - int8x8_t __a1_526 = __noswap_vget_high_s8(__p0_526); - __ret_526 = (int16x8_t)(__noswap_vshll_n_s8(__a1_526, 0)); - return __ret_526; +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_547) { + int16x8_t __ret_547; + int8x8_t __a1_547 = __noswap_vget_high_s8(__p0_547); + __ret_547 = (int16x8_t)(__noswap_vshll_n_s8(__a1_547, 0)); + return __ret_547; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_527) { - int64x2_t __ret_527; - int32x2_t __a1_527 = vget_high_s32(__p0_527); - __ret_527 = (int64x2_t)(vshll_n_s32(__a1_527, 0)); - return __ret_527; +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_548) { + int64x2_t __ret_548; + int32x2_t __a1_548 = vget_high_s32(__p0_548); + __ret_548 = (int64x2_t)(vshll_n_s32(__a1_548, 0)); + return __ret_548; } #else -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_528) { - int64x2_t __ret_528; - int32x4_t __rev0_528; __rev0_528 = __builtin_shufflevector(__p0_528, __p0_528, 3, 2, 1, 0); - int32x2_t __a1_528 = __noswap_vget_high_s32(__rev0_528); - __ret_528 = (int64x2_t)(__noswap_vshll_n_s32(__a1_528, 0)); - __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 1, 0); - return __ret_528; +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_549) { + int64x2_t __ret_549; + int32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__p0_549, __p0_549, 3, 2, 1, 0); + int32x2_t __a1_549 = __noswap_vget_high_s32(__rev0_549); + __ret_549 = (int64x2_t)(__noswap_vshll_n_s32(__a1_549, 0)); + __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 1, 0); + return __ret_549; } -__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_529) { - int64x2_t __ret_529; - int32x2_t __a1_529 = __noswap_vget_high_s32(__p0_529); - __ret_529 = (int64x2_t)(__noswap_vshll_n_s32(__a1_529, 0)); - return __ret_529; +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_550) { + int64x2_t __ret_550; + int32x2_t __a1_550 = __noswap_vget_high_s32(__p0_550); + __ret_550 = (int64x2_t)(__noswap_vshll_n_s32(__a1_550, 0)); + return __ret_550; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_530) { - int32x4_t __ret_530; - int16x4_t __a1_530 = vget_high_s16(__p0_530); - __ret_530 = (int32x4_t)(vshll_n_s16(__a1_530, 0)); - return __ret_530; +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_551) { + int32x4_t __ret_551; + int16x4_t __a1_551 = vget_high_s16(__p0_551); + __ret_551 = (int32x4_t)(vshll_n_s16(__a1_551, 0)); + return __ret_551; } #else -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_531) { - int32x4_t __ret_531; - int16x8_t __rev0_531; __rev0_531 = __builtin_shufflevector(__p0_531, __p0_531, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __a1_531 = __noswap_vget_high_s16(__rev0_531); - __ret_531 = (int32x4_t)(__noswap_vshll_n_s16(__a1_531, 0)); - __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); - return __ret_531; +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_552) { + int32x4_t __ret_552; + int16x8_t __rev0_552; __rev0_552 = __builtin_shufflevector(__p0_552, __p0_552, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __a1_552 = __noswap_vget_high_s16(__rev0_552); + __ret_552 = (int32x4_t)(__noswap_vshll_n_s16(__a1_552, 0)); + __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 3, 2, 1, 0); + return __ret_552; } -__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_532) { - int32x4_t __ret_532; - int16x4_t __a1_532 = __noswap_vget_high_s16(__p0_532); - __ret_532 = (int32x4_t)(__noswap_vshll_n_s16(__a1_532, 0)); - return __ret_532; +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_553) { + int32x4_t __ret_553; + int16x4_t __a1_553 = __noswap_vget_high_s16(__p0_553); + __ret_553 = (int32x4_t)(__noswap_vshll_n_s16(__a1_553, 0)); + return __ret_553; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vmovn_u32(__p1)); return __ret; } #else -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -50398,13 +52144,13 @@ __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vmovn_u64(__p1)); return __ret; } #else -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -50415,13 +52161,13 @@ __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vmovn_u16(__p1)); return __ret; } #else -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50432,13 +52178,13 @@ __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; __ret = vcombine_s16(__p0, vmovn_s32(__p1)); return __ret; } #else -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -50449,13 +52195,13 @@ __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; __ret = vcombine_s32(__p0, vmovn_s64(__p1)); return __ret; } #else -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -50466,13 +52212,13 @@ __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; __ret = vcombine_s8(__p0, vmovn_s16(__p1)); return __ret; } #else -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50483,13 +52229,13 @@ __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 * __p1; return __ret; } #else -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -50499,34 +52245,34 @@ __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 * __p1; return __ret; } -#define vmuld_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \ - float64_t __ret_533; \ - float64_t __s0_533 = __p0_533; \ - float64x1_t __s1_533 = __p1_533; \ - __ret_533 = __s0_533 * vget_lane_f64(__s1_533, __p2_533); \ - __ret_533; \ +#define vmuld_lane_f64(__p0_554, __p1_554, __p2_554) __extension__ ({ \ + float64_t __ret_554; \ + float64_t __s0_554 = __p0_554; \ + float64x1_t __s1_554 = __p1_554; \ + __ret_554 = __s0_554 * vget_lane_f64(__s1_554, __p2_554); \ + __ret_554; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmuls_lane_f32(__p0_534, __p1_534, __p2_534) __extension__ ({ \ - float32_t __ret_534; \ - float32_t __s0_534 = __p0_534; \ - float32x2_t __s1_534 = __p1_534; \ - __ret_534 = __s0_534 * vget_lane_f32(__s1_534, __p2_534); \ - __ret_534; \ +#define vmuls_lane_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ + float32_t __ret_555; \ + float32_t __s0_555 = __p0_555; \ + float32x2_t __s1_555 = __p1_555; \ + __ret_555 = __s0_555 * vget_lane_f32(__s1_555, __p2_555); \ + __ret_555; \ }) #else -#define vmuls_lane_f32(__p0_535, __p1_535, __p2_535) __extension__ ({ \ - float32_t __ret_535; \ - float32_t __s0_535 = __p0_535; \ - float32x2_t __s1_535 = __p1_535; \ - float32x2_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \ - __ret_535 = __s0_535 * __noswap_vget_lane_f32(__rev1_535, __p2_535); \ - __ret_535; \ +#define vmuls_lane_f32(__p0_556, __p1_556, __p2_556) __extension__ ({ \ + float32_t __ret_556; \ + float32_t __s0_556 = __p0_556; \ + float32x2_t __s1_556 = __p1_556; \ + float32x2_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \ + __ret_556 = __s0_556 * __noswap_vget_lane_f32(__rev1_556, __p2_556); \ + __ret_556; \ }) #endif @@ -50538,60 +52284,60 @@ __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f64(__p0_536, __p1_536, __p2_536) __extension__ ({ \ - float64x2_t __ret_536; \ - float64x2_t __s0_536 = __p0_536; \ - float64x1_t __s1_536 = __p1_536; \ - __ret_536 = __s0_536 * splatq_lane_f64(__s1_536, __p2_536); \ - __ret_536; \ +#define vmulq_lane_f64(__p0_557, __p1_557, __p2_557) __extension__ ({ \ + float64x2_t __ret_557; \ + float64x2_t __s0_557 = __p0_557; \ + float64x1_t __s1_557 = __p1_557; \ + __ret_557 = __s0_557 * splatq_lane_f64(__s1_557, __p2_557); \ + __ret_557; \ }) #else -#define vmulq_lane_f64(__p0_537, __p1_537, __p2_537) __extension__ ({ \ - float64x2_t __ret_537; \ - float64x2_t __s0_537 = __p0_537; \ - float64x1_t __s1_537 = __p1_537; \ - float64x2_t __rev0_537; __rev0_537 = __builtin_shufflevector(__s0_537, __s0_537, 1, 0); \ - __ret_537 = __rev0_537 * __noswap_splatq_lane_f64(__s1_537, __p2_537); \ - __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 1, 0); \ - __ret_537; \ +#define vmulq_lane_f64(__p0_558, __p1_558, __p2_558) __extension__ ({ \ + float64x2_t __ret_558; \ + float64x2_t __s0_558 = __p0_558; \ + float64x1_t __s1_558 = __p1_558; \ + float64x2_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 1, 0); \ + __ret_558 = __rev0_558 * __noswap_splatq_lane_f64(__s1_558, __p2_558); \ + __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 1, 0); \ + __ret_558; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmuld_laneq_f64(__p0_538, __p1_538, __p2_538) __extension__ ({ \ - float64_t __ret_538; \ - float64_t __s0_538 = __p0_538; \ - float64x2_t __s1_538 = __p1_538; \ - __ret_538 = __s0_538 * vgetq_lane_f64(__s1_538, __p2_538); \ - __ret_538; \ +#define vmuld_laneq_f64(__p0_559, __p1_559, __p2_559) __extension__ ({ \ + float64_t __ret_559; \ + float64_t __s0_559 = __p0_559; \ + float64x2_t __s1_559 = __p1_559; \ + __ret_559 = __s0_559 * vgetq_lane_f64(__s1_559, __p2_559); \ + __ret_559; \ }) #else -#define vmuld_laneq_f64(__p0_539, __p1_539, __p2_539) __extension__ ({ \ - float64_t __ret_539; \ - float64_t __s0_539 = __p0_539; \ - float64x2_t __s1_539 = __p1_539; \ - float64x2_t __rev1_539; __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 1, 0); \ - __ret_539 = __s0_539 * __noswap_vgetq_lane_f64(__rev1_539, __p2_539); \ - __ret_539; \ +#define vmuld_laneq_f64(__p0_560, __p1_560, __p2_560) __extension__ ({ \ + float64_t __ret_560; \ + float64_t __s0_560 = __p0_560; \ + float64x2_t __s1_560 = __p1_560; \ + float64x2_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 1, 0); \ + __ret_560 = __s0_560 * __noswap_vgetq_lane_f64(__rev1_560, __p2_560); \ + __ret_560; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmuls_laneq_f32(__p0_540, __p1_540, __p2_540) __extension__ ({ \ - float32_t __ret_540; \ - float32_t __s0_540 = __p0_540; \ - float32x4_t __s1_540 = __p1_540; \ - __ret_540 = __s0_540 * vgetq_lane_f32(__s1_540, __p2_540); \ - __ret_540; \ +#define vmuls_laneq_f32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ + float32_t __ret_561; \ + float32_t __s0_561 = __p0_561; \ + float32x4_t __s1_561 = __p1_561; \ + __ret_561 = __s0_561 * vgetq_lane_f32(__s1_561, __p2_561); \ + __ret_561; \ }) #else -#define vmuls_laneq_f32(__p0_541, __p1_541, __p2_541) __extension__ ({ \ - float32_t __ret_541; \ - float32_t __s0_541 = __p0_541; \ - float32x4_t __s1_541 = __p1_541; \ - float32x4_t __rev1_541; __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 3, 2, 1, 0); \ - __ret_541 = __s0_541 * __noswap_vgetq_lane_f32(__rev1_541, __p2_541); \ - __ret_541; \ +#define vmuls_laneq_f32(__p0_562, __p1_562, __p2_562) __extension__ ({ \ + float32_t __ret_562; \ + float32_t __s0_562 = __p0_562; \ + float32x4_t __s1_562 = __p1_562; \ + float32x4_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 3, 2, 1, 0); \ + __ret_562 = __s0_562 * __noswap_vgetq_lane_f32(__rev1_562, __p2_562); \ + __ret_562; \ }) #endif @@ -50615,249 +52361,249 @@ __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u32(__p0_542, __p1_542, __p2_542) __extension__ ({ \ - uint32x4_t __ret_542; \ - uint32x4_t __s0_542 = __p0_542; \ - uint32x4_t __s1_542 = __p1_542; \ - __ret_542 = __s0_542 * splatq_laneq_u32(__s1_542, __p2_542); \ - __ret_542; \ -}) -#else -#define vmulq_laneq_u32(__p0_543, __p1_543, __p2_543) __extension__ ({ \ - uint32x4_t __ret_543; \ - uint32x4_t __s0_543 = __p0_543; \ - uint32x4_t __s1_543 = __p1_543; \ - uint32x4_t __rev0_543; __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 3, 2, 1, 0); \ - uint32x4_t __rev1_543; __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 3, 2, 1, 0); \ - __ret_543 = __rev0_543 * __noswap_splatq_laneq_u32(__rev1_543, __p2_543); \ - __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); \ - __ret_543; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u16(__p0_544, __p1_544, __p2_544) __extension__ ({ \ - uint16x8_t __ret_544; \ - uint16x8_t __s0_544 = __p0_544; \ - uint16x8_t __s1_544 = __p1_544; \ - __ret_544 = __s0_544 * splatq_laneq_u16(__s1_544, __p2_544); \ - __ret_544; \ -}) -#else -#define vmulq_laneq_u16(__p0_545, __p1_545, __p2_545) __extension__ ({ \ - uint16x8_t __ret_545; \ - uint16x8_t __s0_545 = __p0_545; \ - uint16x8_t __s1_545 = __p1_545; \ - uint16x8_t __rev0_545; __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_545; __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_545 = __rev0_545 * __noswap_splatq_laneq_u16(__rev1_545, __p2_545); \ - __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_545; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f64(__p0_546, __p1_546, __p2_546) __extension__ ({ \ - float64x2_t __ret_546; \ - float64x2_t __s0_546 = __p0_546; \ - float64x2_t __s1_546 = __p1_546; \ - __ret_546 = __s0_546 * splatq_laneq_f64(__s1_546, __p2_546); \ - __ret_546; \ -}) -#else -#define vmulq_laneq_f64(__p0_547, __p1_547, __p2_547) __extension__ ({ \ - float64x2_t __ret_547; \ - float64x2_t __s0_547 = __p0_547; \ - float64x2_t __s1_547 = __p1_547; \ - float64x2_t __rev0_547; __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 1, 0); \ - float64x2_t __rev1_547; __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 1, 0); \ - __ret_547 = __rev0_547 * __noswap_splatq_laneq_f64(__rev1_547, __p2_547); \ - __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 1, 0); \ - __ret_547; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f32(__p0_548, __p1_548, __p2_548) __extension__ ({ \ - float32x4_t __ret_548; \ - float32x4_t __s0_548 = __p0_548; \ - float32x4_t __s1_548 = __p1_548; \ - __ret_548 = __s0_548 * splatq_laneq_f32(__s1_548, __p2_548); \ - __ret_548; \ -}) -#else -#define vmulq_laneq_f32(__p0_549, __p1_549, __p2_549) __extension__ ({ \ - float32x4_t __ret_549; \ - float32x4_t __s0_549 = __p0_549; \ - float32x4_t __s1_549 = __p1_549; \ - float32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 3, 2, 1, 0); \ - float32x4_t __rev1_549; __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 3, 2, 1, 0); \ - __ret_549 = __rev0_549 * __noswap_splatq_laneq_f32(__rev1_549, __p2_549); \ - __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 3, 2, 1, 0); \ - __ret_549; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s32(__p0_550, __p1_550, __p2_550) __extension__ ({ \ - int32x4_t __ret_550; \ - int32x4_t __s0_550 = __p0_550; \ - int32x4_t __s1_550 = __p1_550; \ - __ret_550 = __s0_550 * splatq_laneq_s32(__s1_550, __p2_550); \ - __ret_550; \ -}) -#else -#define vmulq_laneq_s32(__p0_551, __p1_551, __p2_551) __extension__ ({ \ - int32x4_t __ret_551; \ - int32x4_t __s0_551 = __p0_551; \ - int32x4_t __s1_551 = __p1_551; \ - int32x4_t __rev0_551; __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 3, 2, 1, 0); \ - int32x4_t __rev1_551; __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \ - __ret_551 = __rev0_551 * __noswap_splatq_laneq_s32(__rev1_551, __p2_551); \ - __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 3, 2, 1, 0); \ - __ret_551; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s16(__p0_552, __p1_552, __p2_552) __extension__ ({ \ - int16x8_t __ret_552; \ - int16x8_t __s0_552 = __p0_552; \ - int16x8_t __s1_552 = __p1_552; \ - __ret_552 = __s0_552 * splatq_laneq_s16(__s1_552, __p2_552); \ - __ret_552; \ -}) -#else -#define vmulq_laneq_s16(__p0_553, __p1_553, __p2_553) __extension__ ({ \ - int16x8_t __ret_553; \ - int16x8_t __s0_553 = __p0_553; \ - int16x8_t __s1_553 = __p1_553; \ - int16x8_t __rev0_553; __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_553; __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_553 = __rev0_553 * __noswap_splatq_laneq_s16(__rev1_553, __p2_553); \ - __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_553; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u32(__p0_554, __p1_554, __p2_554) __extension__ ({ \ - uint32x2_t __ret_554; \ - uint32x2_t __s0_554 = __p0_554; \ - uint32x4_t __s1_554 = __p1_554; \ - __ret_554 = __s0_554 * splat_laneq_u32(__s1_554, __p2_554); \ - __ret_554; \ -}) -#else -#define vmul_laneq_u32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ - uint32x2_t __ret_555; \ - uint32x2_t __s0_555 = __p0_555; \ - uint32x4_t __s1_555 = __p1_555; \ - uint32x2_t __rev0_555; __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \ - uint32x4_t __rev1_555; __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \ - __ret_555 = __rev0_555 * __noswap_splat_laneq_u32(__rev1_555, __p2_555); \ - __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \ - __ret_555; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u16(__p0_556, __p1_556, __p2_556) __extension__ ({ \ - uint16x4_t __ret_556; \ - uint16x4_t __s0_556 = __p0_556; \ - uint16x8_t __s1_556 = __p1_556; \ - __ret_556 = __s0_556 * splat_laneq_u16(__s1_556, __p2_556); \ - __ret_556; \ -}) -#else -#define vmul_laneq_u16(__p0_557, __p1_557, __p2_557) __extension__ ({ \ - uint16x4_t __ret_557; \ - uint16x4_t __s0_557 = __p0_557; \ - uint16x8_t __s1_557 = __p1_557; \ - uint16x4_t __rev0_557; __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 3, 2, 1, 0); \ - uint16x8_t __rev1_557; __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_557 = __rev0_557 * __noswap_splat_laneq_u16(__rev1_557, __p2_557); \ - __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 3, 2, 1, 0); \ - __ret_557; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f32(__p0_558, __p1_558, __p2_558) __extension__ ({ \ - float32x2_t __ret_558; \ - float32x2_t __s0_558 = __p0_558; \ - float32x4_t __s1_558 = __p1_558; \ - __ret_558 = __s0_558 * splat_laneq_f32(__s1_558, __p2_558); \ - __ret_558; \ -}) -#else -#define vmul_laneq_f32(__p0_559, __p1_559, __p2_559) __extension__ ({ \ - float32x2_t __ret_559; \ - float32x2_t __s0_559 = __p0_559; \ - float32x4_t __s1_559 = __p1_559; \ - float32x2_t __rev0_559; __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 1, 0); \ - float32x4_t __rev1_559; __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 3, 2, 1, 0); \ - __ret_559 = __rev0_559 * __noswap_splat_laneq_f32(__rev1_559, __p2_559); \ - __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 1, 0); \ - __ret_559; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s32(__p0_560, __p1_560, __p2_560) __extension__ ({ \ - int32x2_t __ret_560; \ - int32x2_t __s0_560 = __p0_560; \ - int32x4_t __s1_560 = __p1_560; \ - __ret_560 = __s0_560 * splat_laneq_s32(__s1_560, __p2_560); \ - __ret_560; \ -}) -#else -#define vmul_laneq_s32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ - int32x2_t __ret_561; \ - int32x2_t __s0_561 = __p0_561; \ - int32x4_t __s1_561 = __p1_561; \ - int32x2_t __rev0_561; __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 1, 0); \ - int32x4_t __rev1_561; __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 3, 2, 1, 0); \ - __ret_561 = __rev0_561 * __noswap_splat_laneq_s32(__rev1_561, __p2_561); \ - __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \ - __ret_561; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s16(__p0_562, __p1_562, __p2_562) __extension__ ({ \ - int16x4_t __ret_562; \ - int16x4_t __s0_562 = __p0_562; \ - int16x8_t __s1_562 = __p1_562; \ - __ret_562 = __s0_562 * splat_laneq_s16(__s1_562, __p2_562); \ - __ret_562; \ -}) -#else -#define vmul_laneq_s16(__p0_563, __p1_563, __p2_563) __extension__ ({ \ - int16x4_t __ret_563; \ - int16x4_t __s0_563 = __p0_563; \ - int16x8_t __s1_563 = __p1_563; \ - int16x4_t __rev0_563; __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 3, 2, 1, 0); \ - int16x8_t __rev1_563; __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_563 = __rev0_563 * __noswap_splat_laneq_s16(__rev1_563, __p2_563); \ - __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \ +#define vmulq_laneq_u32(__p0_563, __p1_563, __p2_563) __extension__ ({ \ + uint32x4_t __ret_563; \ + uint32x4_t __s0_563 = __p0_563; \ + uint32x4_t __s1_563 = __p1_563; \ + __ret_563 = __s0_563 * splatq_laneq_u32(__s1_563, __p2_563); \ __ret_563; \ }) +#else +#define vmulq_laneq_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ + uint32x4_t __ret_564; \ + uint32x4_t __s0_564 = __p0_564; \ + uint32x4_t __s1_564 = __p1_564; \ + uint32x4_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \ + uint32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ + __ret_564 = __rev0_564 * __noswap_splatq_laneq_u32(__rev1_564, __p2_564); \ + __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \ + __ret_564; \ +}) #endif -__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u16(__p0_565, __p1_565, __p2_565) __extension__ ({ \ + uint16x8_t __ret_565; \ + uint16x8_t __s0_565 = __p0_565; \ + uint16x8_t __s1_565 = __p1_565; \ + __ret_565 = __s0_565 * splatq_laneq_u16(__s1_565, __p2_565); \ + __ret_565; \ +}) +#else +#define vmulq_laneq_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ + uint16x8_t __ret_566; \ + uint16x8_t __s0_566 = __p0_566; \ + uint16x8_t __s1_566 = __p1_566; \ + uint16x8_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566 = __rev0_566 * __noswap_splatq_laneq_u16(__rev1_566, __p2_566); \ + __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f64(__p0_567, __p1_567, __p2_567) __extension__ ({ \ + float64x2_t __ret_567; \ + float64x2_t __s0_567 = __p0_567; \ + float64x2_t __s1_567 = __p1_567; \ + __ret_567 = __s0_567 * splatq_laneq_f64(__s1_567, __p2_567); \ + __ret_567; \ +}) +#else +#define vmulq_laneq_f64(__p0_568, __p1_568, __p2_568) __extension__ ({ \ + float64x2_t __ret_568; \ + float64x2_t __s0_568 = __p0_568; \ + float64x2_t __s1_568 = __p1_568; \ + float64x2_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \ + float64x2_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \ + __ret_568 = __rev0_568 * __noswap_splatq_laneq_f64(__rev1_568, __p2_568); \ + __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \ + __ret_568; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ + float32x4_t __ret_569; \ + float32x4_t __s0_569 = __p0_569; \ + float32x4_t __s1_569 = __p1_569; \ + __ret_569 = __s0_569 * splatq_laneq_f32(__s1_569, __p2_569); \ + __ret_569; \ +}) +#else +#define vmulq_laneq_f32(__p0_570, __p1_570, __p2_570) __extension__ ({ \ + float32x4_t __ret_570; \ + float32x4_t __s0_570 = __p0_570; \ + float32x4_t __s1_570 = __p1_570; \ + float32x4_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \ + float32x4_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \ + __ret_570 = __rev0_570 * __noswap_splatq_laneq_f32(__rev1_570, __p2_570); \ + __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \ + __ret_570; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s32(__p0_571, __p1_571, __p2_571) __extension__ ({ \ + int32x4_t __ret_571; \ + int32x4_t __s0_571 = __p0_571; \ + int32x4_t __s1_571 = __p1_571; \ + __ret_571 = __s0_571 * splatq_laneq_s32(__s1_571, __p2_571); \ + __ret_571; \ +}) +#else +#define vmulq_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ + int32x4_t __ret_572; \ + int32x4_t __s0_572 = __p0_572; \ + int32x4_t __s1_572 = __p1_572; \ + int32x4_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \ + int32x4_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \ + __ret_572 = __rev0_572 * __noswap_splatq_laneq_s32(__rev1_572, __p2_572); \ + __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \ + __ret_572; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s16(__p0_573, __p1_573, __p2_573) __extension__ ({ \ + int16x8_t __ret_573; \ + int16x8_t __s0_573 = __p0_573; \ + int16x8_t __s1_573 = __p1_573; \ + __ret_573 = __s0_573 * splatq_laneq_s16(__s1_573, __p2_573); \ + __ret_573; \ +}) +#else +#define vmulq_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ + int16x8_t __ret_574; \ + int16x8_t __s0_574 = __p0_574; \ + int16x8_t __s1_574 = __p1_574; \ + int16x8_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574 = __rev0_574 * __noswap_splatq_laneq_s16(__rev1_574, __p2_574); \ + __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u32(__p0_575, __p1_575, __p2_575) __extension__ ({ \ + uint32x2_t __ret_575; \ + uint32x2_t __s0_575 = __p0_575; \ + uint32x4_t __s1_575 = __p1_575; \ + __ret_575 = __s0_575 * splat_laneq_u32(__s1_575, __p2_575); \ + __ret_575; \ +}) +#else +#define vmul_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ + uint32x2_t __ret_576; \ + uint32x2_t __s0_576 = __p0_576; \ + uint32x4_t __s1_576 = __p1_576; \ + uint32x2_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \ + uint32x4_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \ + __ret_576 = __rev0_576 * __noswap_splat_laneq_u32(__rev1_576, __p2_576); \ + __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \ + __ret_576; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u16(__p0_577, __p1_577, __p2_577) __extension__ ({ \ + uint16x4_t __ret_577; \ + uint16x4_t __s0_577 = __p0_577; \ + uint16x8_t __s1_577 = __p1_577; \ + __ret_577 = __s0_577 * splat_laneq_u16(__s1_577, __p2_577); \ + __ret_577; \ +}) +#else +#define vmul_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ + uint16x4_t __ret_578; \ + uint16x4_t __s0_578 = __p0_578; \ + uint16x8_t __s1_578 = __p1_578; \ + uint16x4_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \ + uint16x8_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_578 = __rev0_578 * __noswap_splat_laneq_u16(__rev1_578, __p2_578); \ + __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \ + __ret_578; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f32(__p0_579, __p1_579, __p2_579) __extension__ ({ \ + float32x2_t __ret_579; \ + float32x2_t __s0_579 = __p0_579; \ + float32x4_t __s1_579 = __p1_579; \ + __ret_579 = __s0_579 * splat_laneq_f32(__s1_579, __p2_579); \ + __ret_579; \ +}) +#else +#define vmul_laneq_f32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ + float32x2_t __ret_580; \ + float32x2_t __s0_580 = __p0_580; \ + float32x4_t __s1_580 = __p1_580; \ + float32x2_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \ + float32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ + __ret_580 = __rev0_580 * __noswap_splat_laneq_f32(__rev1_580, __p2_580); \ + __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \ + __ret_580; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ + int32x2_t __ret_581; \ + int32x2_t __s0_581 = __p0_581; \ + int32x4_t __s1_581 = __p1_581; \ + __ret_581 = __s0_581 * splat_laneq_s32(__s1_581, __p2_581); \ + __ret_581; \ +}) +#else +#define vmul_laneq_s32(__p0_582, __p1_582, __p2_582) __extension__ ({ \ + int32x2_t __ret_582; \ + int32x2_t __s0_582 = __p0_582; \ + int32x4_t __s1_582 = __p1_582; \ + int32x2_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 1, 0); \ + int32x4_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \ + __ret_582 = __rev0_582 * __noswap_splat_laneq_s32(__rev1_582, __p2_582); \ + __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 1, 0); \ + __ret_582; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ + int16x4_t __ret_583; \ + int16x4_t __s0_583 = __p0_583; \ + int16x8_t __s1_583 = __p1_583; \ + __ret_583 = __s0_583 * splat_laneq_s16(__s1_583, __p2_583); \ + __ret_583; \ +}) +#else +#define vmul_laneq_s16(__p0_584, __p1_584, __p2_584) __extension__ ({ \ + int16x4_t __ret_584; \ + int16x4_t __s0_584 = __p0_584; \ + int16x8_t __s1_584 = __p1_584; \ + int16x4_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 3, 2, 1, 0); \ + int16x8_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_584 = __rev0_584 * __noswap_splat_laneq_s16(__rev1_584, __p2_584); \ + __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 3, 2, 1, 0); \ + __ret_584; \ +}) +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { float64x2_t __ret; __ret = __p0 * (float64x2_t) {__p1, __p1}; return __ret; } #else -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = __rev0 * (float64x2_t) {__p1, __p1}; @@ -50867,13 +52613,13 @@ __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { poly16x8_t __ret; __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); return __ret; } #else -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { poly16x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50884,13 +52630,13 @@ __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); return __ret; } #else -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50901,13 +52647,13 @@ __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } #else -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -50918,13 +52664,13 @@ __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); return __ret; } #else -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50935,13 +52681,13 @@ __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); return __ret; } #else -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50952,13 +52698,13 @@ __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -50969,13 +52715,13 @@ __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -50986,181 +52732,181 @@ __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ - uint64x2_t __ret_564; \ - uint32x4_t __s0_564 = __p0_564; \ - uint32x2_t __s1_564 = __p1_564; \ - __ret_564 = vmull_u32(vget_high_u32(__s0_564), splat_lane_u32(__s1_564, __p2_564)); \ - __ret_564; \ +#define vmull_high_lane_u32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ + uint64x2_t __ret_585; \ + uint32x4_t __s0_585 = __p0_585; \ + uint32x2_t __s1_585 = __p1_585; \ + __ret_585 = vmull_u32(vget_high_u32(__s0_585), splat_lane_u32(__s1_585, __p2_585)); \ + __ret_585; \ }) #else -#define vmull_high_lane_u32(__p0_565, __p1_565, __p2_565) __extension__ ({ \ - uint64x2_t __ret_565; \ - uint32x4_t __s0_565 = __p0_565; \ - uint32x2_t __s1_565 = __p1_565; \ - uint32x4_t __rev0_565; __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \ - uint32x2_t __rev1_565; __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \ - __ret_565 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_565), __noswap_splat_lane_u32(__rev1_565, __p2_565)); \ - __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \ - __ret_565; \ +#define vmull_high_lane_u32(__p0_586, __p1_586, __p2_586) __extension__ ({ \ + uint64x2_t __ret_586; \ + uint32x4_t __s0_586 = __p0_586; \ + uint32x2_t __s1_586 = __p1_586; \ + uint32x4_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \ + uint32x2_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \ + __ret_586 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_586), __noswap_splat_lane_u32(__rev1_586, __p2_586)); \ + __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \ + __ret_586; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ - uint32x4_t __ret_566; \ - uint16x8_t __s0_566 = __p0_566; \ - uint16x4_t __s1_566 = __p1_566; \ - __ret_566 = vmull_u16(vget_high_u16(__s0_566), splat_lane_u16(__s1_566, __p2_566)); \ - __ret_566; \ +#define vmull_high_lane_u16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ + uint32x4_t __ret_587; \ + uint16x8_t __s0_587 = __p0_587; \ + uint16x4_t __s1_587 = __p1_587; \ + __ret_587 = vmull_u16(vget_high_u16(__s0_587), splat_lane_u16(__s1_587, __p2_587)); \ + __ret_587; \ }) #else -#define vmull_high_lane_u16(__p0_567, __p1_567, __p2_567) __extension__ ({ \ - uint32x4_t __ret_567; \ - uint16x8_t __s0_567 = __p0_567; \ - uint16x4_t __s1_567 = __p1_567; \ - uint16x8_t __rev0_567; __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_567; __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \ - __ret_567 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_567), __noswap_splat_lane_u16(__rev1_567, __p2_567)); \ - __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \ - __ret_567; \ +#define vmull_high_lane_u16(__p0_588, __p1_588, __p2_588) __extension__ ({ \ + uint32x4_t __ret_588; \ + uint16x8_t __s0_588 = __p0_588; \ + uint16x4_t __s1_588 = __p1_588; \ + uint16x8_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ + __ret_588 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_588), __noswap_splat_lane_u16(__rev1_588, __p2_588)); \ + __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \ + __ret_588; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s32(__p0_568, __p1_568, __p2_568) __extension__ ({ \ - int64x2_t __ret_568; \ - int32x4_t __s0_568 = __p0_568; \ - int32x2_t __s1_568 = __p1_568; \ - __ret_568 = vmull_s32(vget_high_s32(__s0_568), splat_lane_s32(__s1_568, __p2_568)); \ - __ret_568; \ +#define vmull_high_lane_s32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ + int64x2_t __ret_589; \ + int32x4_t __s0_589 = __p0_589; \ + int32x2_t __s1_589 = __p1_589; \ + __ret_589 = vmull_s32(vget_high_s32(__s0_589), splat_lane_s32(__s1_589, __p2_589)); \ + __ret_589; \ }) #else -#define vmull_high_lane_s32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ - int64x2_t __ret_569; \ - int32x4_t __s0_569 = __p0_569; \ - int32x2_t __s1_569 = __p1_569; \ - int32x4_t __rev0_569; __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \ - int32x2_t __rev1_569; __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 1, 0); \ - __ret_569 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_569), __noswap_splat_lane_s32(__rev1_569, __p2_569)); \ - __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \ - __ret_569; \ +#define vmull_high_lane_s32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ + int64x2_t __ret_590; \ + int32x4_t __s0_590 = __p0_590; \ + int32x2_t __s1_590 = __p1_590; \ + int32x4_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \ + int32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ + __ret_590 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_590), __noswap_splat_lane_s32(__rev1_590, __p2_590)); \ + __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \ + __ret_590; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s16(__p0_570, __p1_570, __p2_570) __extension__ ({ \ - int32x4_t __ret_570; \ - int16x8_t __s0_570 = __p0_570; \ - int16x4_t __s1_570 = __p1_570; \ - __ret_570 = vmull_s16(vget_high_s16(__s0_570), splat_lane_s16(__s1_570, __p2_570)); \ - __ret_570; \ +#define vmull_high_lane_s16(__p0_591, __p1_591, __p2_591) __extension__ ({ \ + int32x4_t __ret_591; \ + int16x8_t __s0_591 = __p0_591; \ + int16x4_t __s1_591 = __p1_591; \ + __ret_591 = vmull_s16(vget_high_s16(__s0_591), splat_lane_s16(__s1_591, __p2_591)); \ + __ret_591; \ }) #else -#define vmull_high_lane_s16(__p0_571, __p1_571, __p2_571) __extension__ ({ \ - int32x4_t __ret_571; \ - int16x8_t __s0_571 = __p0_571; \ - int16x4_t __s1_571 = __p1_571; \ - int16x8_t __rev0_571; __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_571; __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 3, 2, 1, 0); \ - __ret_571 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_571), __noswap_splat_lane_s16(__rev1_571, __p2_571)); \ - __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \ - __ret_571; \ +#define vmull_high_lane_s16(__p0_592, __p1_592, __p2_592) __extension__ ({ \ + int32x4_t __ret_592; \ + int16x8_t __s0_592 = __p0_592; \ + int16x4_t __s1_592 = __p1_592; \ + int16x8_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \ + __ret_592 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_592), __noswap_splat_lane_s16(__rev1_592, __p2_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 3, 2, 1, 0); \ + __ret_592; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ - uint64x2_t __ret_572; \ - uint32x4_t __s0_572 = __p0_572; \ - uint32x4_t __s1_572 = __p1_572; \ - __ret_572 = vmull_u32(vget_high_u32(__s0_572), splat_laneq_u32(__s1_572, __p2_572)); \ - __ret_572; \ +#define vmull_high_laneq_u32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ + uint64x2_t __ret_593; \ + uint32x4_t __s0_593 = __p0_593; \ + uint32x4_t __s1_593 = __p1_593; \ + __ret_593 = vmull_u32(vget_high_u32(__s0_593), splat_laneq_u32(__s1_593, __p2_593)); \ + __ret_593; \ }) #else -#define vmull_high_laneq_u32(__p0_573, __p1_573, __p2_573) __extension__ ({ \ - uint64x2_t __ret_573; \ - uint32x4_t __s0_573 = __p0_573; \ - uint32x4_t __s1_573 = __p1_573; \ - uint32x4_t __rev0_573; __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \ - uint32x4_t __rev1_573; __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \ - __ret_573 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_573), __noswap_splat_laneq_u32(__rev1_573, __p2_573)); \ - __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \ - __ret_573; \ +#define vmull_high_laneq_u32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ + uint64x2_t __ret_594; \ + uint32x4_t __s0_594 = __p0_594; \ + uint32x4_t __s1_594 = __p1_594; \ + uint32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + uint32x4_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \ + __ret_594 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_594), __noswap_splat_laneq_u32(__rev1_594, __p2_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 1, 0); \ + __ret_594; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ - uint32x4_t __ret_574; \ - uint16x8_t __s0_574 = __p0_574; \ - uint16x8_t __s1_574 = __p1_574; \ - __ret_574 = vmull_u16(vget_high_u16(__s0_574), splat_laneq_u16(__s1_574, __p2_574)); \ - __ret_574; \ +#define vmull_high_laneq_u16(__p0_595, __p1_595, __p2_595) __extension__ ({ \ + uint32x4_t __ret_595; \ + uint16x8_t __s0_595 = __p0_595; \ + uint16x8_t __s1_595 = __p1_595; \ + __ret_595 = vmull_u16(vget_high_u16(__s0_595), splat_laneq_u16(__s1_595, __p2_595)); \ + __ret_595; \ }) #else -#define vmull_high_laneq_u16(__p0_575, __p1_575, __p2_575) __extension__ ({ \ - uint32x4_t __ret_575; \ - uint16x8_t __s0_575 = __p0_575; \ - uint16x8_t __s1_575 = __p1_575; \ - uint16x8_t __rev0_575; __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_575; __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_575 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_575), __noswap_splat_laneq_u16(__rev1_575, __p2_575)); \ - __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \ - __ret_575; \ +#define vmull_high_laneq_u16(__p0_596, __p1_596, __p2_596) __extension__ ({ \ + uint32x4_t __ret_596; \ + uint16x8_t __s0_596 = __p0_596; \ + uint16x8_t __s1_596 = __p1_596; \ + uint16x8_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_596 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_596), __noswap_splat_laneq_u16(__rev1_596, __p2_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 3, 2, 1, 0); \ + __ret_596; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ - int64x2_t __ret_576; \ - int32x4_t __s0_576 = __p0_576; \ - int32x4_t __s1_576 = __p1_576; \ - __ret_576 = vmull_s32(vget_high_s32(__s0_576), splat_laneq_s32(__s1_576, __p2_576)); \ - __ret_576; \ +#define vmull_high_laneq_s32(__p0_597, __p1_597, __p2_597) __extension__ ({ \ + int64x2_t __ret_597; \ + int32x4_t __s0_597 = __p0_597; \ + int32x4_t __s1_597 = __p1_597; \ + __ret_597 = vmull_s32(vget_high_s32(__s0_597), splat_laneq_s32(__s1_597, __p2_597)); \ + __ret_597; \ }) #else -#define vmull_high_laneq_s32(__p0_577, __p1_577, __p2_577) __extension__ ({ \ - int64x2_t __ret_577; \ - int32x4_t __s0_577 = __p0_577; \ - int32x4_t __s1_577 = __p1_577; \ - int32x4_t __rev0_577; __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 3, 2, 1, 0); \ - int32x4_t __rev1_577; __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \ - __ret_577 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_577), __noswap_splat_laneq_s32(__rev1_577, __p2_577)); \ - __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \ - __ret_577; \ +#define vmull_high_laneq_s32(__p0_598, __p1_598, __p2_598) __extension__ ({ \ + int64x2_t __ret_598; \ + int32x4_t __s0_598 = __p0_598; \ + int32x4_t __s1_598 = __p1_598; \ + int32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ + int32x4_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \ + __ret_598 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_598), __noswap_splat_laneq_s32(__rev1_598, __p2_598)); \ + __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \ + __ret_598; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ - int32x4_t __ret_578; \ - int16x8_t __s0_578 = __p0_578; \ - int16x8_t __s1_578 = __p1_578; \ - __ret_578 = vmull_s16(vget_high_s16(__s0_578), splat_laneq_s16(__s1_578, __p2_578)); \ - __ret_578; \ +#define vmull_high_laneq_s16(__p0_599, __p1_599, __p2_599) __extension__ ({ \ + int32x4_t __ret_599; \ + int16x8_t __s0_599 = __p0_599; \ + int16x8_t __s1_599 = __p1_599; \ + __ret_599 = vmull_s16(vget_high_s16(__s0_599), splat_laneq_s16(__s1_599, __p2_599)); \ + __ret_599; \ }) #else -#define vmull_high_laneq_s16(__p0_579, __p1_579, __p2_579) __extension__ ({ \ - int32x4_t __ret_579; \ - int16x8_t __s0_579 = __p0_579; \ - int16x8_t __s1_579 = __p1_579; \ - int16x8_t __rev0_579; __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_579; __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_579 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_579), __noswap_splat_laneq_s16(__rev1_579, __p2_579)); \ - __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \ - __ret_579; \ +#define vmull_high_laneq_s16(__p0_600, __p1_600, __p2_600) __extension__ ({ \ + int32x4_t __ret_600; \ + int16x8_t __s0_600 = __p0_600; \ + int16x8_t __s1_600 = __p1_600; \ + int16x8_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_600 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_600), __noswap_splat_laneq_s16(__rev1_600, __p2_600)); \ + __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \ + __ret_600; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { uint64x2_t __ret; __ret = vmull_n_u32(vget_high_u32(__p0), __p1); return __ret; } #else -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); @@ -51170,13 +52916,13 @@ __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { uint32x4_t __ret; __ret = vmull_n_u16(vget_high_u16(__p0), __p1); return __ret; } #else -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); @@ -51186,13 +52932,13 @@ __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); @@ -51202,13 +52948,13 @@ __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); @@ -51218,97 +52964,97 @@ __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ - uint64x2_t __ret_580; \ - uint32x2_t __s0_580 = __p0_580; \ - uint32x4_t __s1_580 = __p1_580; \ - __ret_580 = vmull_u32(__s0_580, splat_laneq_u32(__s1_580, __p2_580)); \ - __ret_580; \ +#define vmull_laneq_u32(__p0_601, __p1_601, __p2_601) __extension__ ({ \ + uint64x2_t __ret_601; \ + uint32x2_t __s0_601 = __p0_601; \ + uint32x4_t __s1_601 = __p1_601; \ + __ret_601 = vmull_u32(__s0_601, splat_laneq_u32(__s1_601, __p2_601)); \ + __ret_601; \ }) #else -#define vmull_laneq_u32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ - uint64x2_t __ret_581; \ - uint32x2_t __s0_581 = __p0_581; \ - uint32x4_t __s1_581 = __p1_581; \ - uint32x2_t __rev0_581; __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \ - uint32x4_t __rev1_581; __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \ - __ret_581 = __noswap_vmull_u32(__rev0_581, __noswap_splat_laneq_u32(__rev1_581, __p2_581)); \ - __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \ - __ret_581; \ +#define vmull_laneq_u32(__p0_602, __p1_602, __p2_602) __extension__ ({ \ + uint64x2_t __ret_602; \ + uint32x2_t __s0_602 = __p0_602; \ + uint32x4_t __s1_602 = __p1_602; \ + uint32x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ + uint32x4_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \ + __ret_602 = __noswap_vmull_u32(__rev0_602, __noswap_splat_laneq_u32(__rev1_602, __p2_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ + __ret_602; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u16(__p0_582, __p1_582, __p2_582) __extension__ ({ \ - uint32x4_t __ret_582; \ - uint16x4_t __s0_582 = __p0_582; \ - uint16x8_t __s1_582 = __p1_582; \ - __ret_582 = vmull_u16(__s0_582, splat_laneq_u16(__s1_582, __p2_582)); \ - __ret_582; \ +#define vmull_laneq_u16(__p0_603, __p1_603, __p2_603) __extension__ ({ \ + uint32x4_t __ret_603; \ + uint16x4_t __s0_603 = __p0_603; \ + uint16x8_t __s1_603 = __p1_603; \ + __ret_603 = vmull_u16(__s0_603, splat_laneq_u16(__s1_603, __p2_603)); \ + __ret_603; \ }) #else -#define vmull_laneq_u16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ - uint32x4_t __ret_583; \ - uint16x4_t __s0_583 = __p0_583; \ - uint16x8_t __s1_583 = __p1_583; \ - uint16x4_t __rev0_583; __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \ - uint16x8_t __rev1_583; __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_583 = __noswap_vmull_u16(__rev0_583, __noswap_splat_laneq_u16(__rev1_583, __p2_583)); \ - __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \ - __ret_583; \ +#define vmull_laneq_u16(__p0_604, __p1_604, __p2_604) __extension__ ({ \ + uint32x4_t __ret_604; \ + uint16x4_t __s0_604 = __p0_604; \ + uint16x8_t __s1_604 = __p1_604; \ + uint16x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ + uint16x8_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_604 = __noswap_vmull_u16(__rev0_604, __noswap_splat_laneq_u16(__rev1_604, __p2_604)); \ + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ + __ret_604; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s32(__p0_584, __p1_584, __p2_584) __extension__ ({ \ - int64x2_t __ret_584; \ - int32x2_t __s0_584 = __p0_584; \ - int32x4_t __s1_584 = __p1_584; \ - __ret_584 = vmull_s32(__s0_584, splat_laneq_s32(__s1_584, __p2_584)); \ - __ret_584; \ +#define vmull_laneq_s32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ + int64x2_t __ret_605; \ + int32x2_t __s0_605 = __p0_605; \ + int32x4_t __s1_605 = __p1_605; \ + __ret_605 = vmull_s32(__s0_605, splat_laneq_s32(__s1_605, __p2_605)); \ + __ret_605; \ }) #else -#define vmull_laneq_s32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ - int64x2_t __ret_585; \ - int32x2_t __s0_585 = __p0_585; \ - int32x4_t __s1_585 = __p1_585; \ - int32x2_t __rev0_585; __rev0_585 = __builtin_shufflevector(__s0_585, __s0_585, 1, 0); \ - int32x4_t __rev1_585; __rev1_585 = __builtin_shufflevector(__s1_585, __s1_585, 3, 2, 1, 0); \ - __ret_585 = __noswap_vmull_s32(__rev0_585, __noswap_splat_laneq_s32(__rev1_585, __p2_585)); \ - __ret_585 = __builtin_shufflevector(__ret_585, __ret_585, 1, 0); \ - __ret_585; \ +#define vmull_laneq_s32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ + int64x2_t __ret_606; \ + int32x2_t __s0_606 = __p0_606; \ + int32x4_t __s1_606 = __p1_606; \ + int32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ + int32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ + __ret_606 = __noswap_vmull_s32(__rev0_606, __noswap_splat_laneq_s32(__rev1_606, __p2_606)); \ + __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ + __ret_606; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s16(__p0_586, __p1_586, __p2_586) __extension__ ({ \ - int32x4_t __ret_586; \ - int16x4_t __s0_586 = __p0_586; \ - int16x8_t __s1_586 = __p1_586; \ - __ret_586 = vmull_s16(__s0_586, splat_laneq_s16(__s1_586, __p2_586)); \ - __ret_586; \ +#define vmull_laneq_s16(__p0_607, __p1_607, __p2_607) __extension__ ({ \ + int32x4_t __ret_607; \ + int16x4_t __s0_607 = __p0_607; \ + int16x8_t __s1_607 = __p1_607; \ + __ret_607 = vmull_s16(__s0_607, splat_laneq_s16(__s1_607, __p2_607)); \ + __ret_607; \ }) #else -#define vmull_laneq_s16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ - int32x4_t __ret_587; \ - int16x4_t __s0_587 = __p0_587; \ - int16x8_t __s1_587 = __p1_587; \ - int16x4_t __rev0_587; __rev0_587 = __builtin_shufflevector(__s0_587, __s0_587, 3, 2, 1, 0); \ - int16x8_t __rev1_587; __rev1_587 = __builtin_shufflevector(__s1_587, __s1_587, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_587 = __noswap_vmull_s16(__rev0_587, __noswap_splat_laneq_s16(__rev1_587, __p2_587)); \ - __ret_587 = __builtin_shufflevector(__ret_587, __ret_587, 3, 2, 1, 0); \ - __ret_587; \ +#define vmull_laneq_s16(__p0_608, __p1_608, __p2_608) __extension__ ({ \ + int32x4_t __ret_608; \ + int16x4_t __s0_608 = __p0_608; \ + int16x8_t __s1_608 = __p1_608; \ + int16x4_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 3, 2, 1, 0); \ + int16x8_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_608 = __noswap_vmull_s16(__rev0_608, __noswap_splat_laneq_s16(__rev1_608, __p2_608)); \ + __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 3, 2, 1, 0); \ + __ret_608; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51316,7 +53062,7 @@ __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; @@ -51324,13 +53070,13 @@ __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51338,26 +53084,26 @@ __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #endif -__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51365,220 +53111,220 @@ __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #endif -__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); return __ret; } -__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); return __ret; } -#define vmulxd_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \ - float64_t __ret_588; \ - float64_t __s0_588 = __p0_588; \ - float64x1_t __s1_588 = __p1_588; \ - __ret_588 = vmulxd_f64(__s0_588, vget_lane_f64(__s1_588, __p2_588)); \ - __ret_588; \ +#define vmulxd_lane_f64(__p0_609, __p1_609, __p2_609) __extension__ ({ \ + float64_t __ret_609; \ + float64_t __s0_609 = __p0_609; \ + float64x1_t __s1_609 = __p1_609; \ + __ret_609 = vmulxd_f64(__s0_609, vget_lane_f64(__s1_609, __p2_609)); \ + __ret_609; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmulxs_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ - float32_t __ret_589; \ - float32_t __s0_589 = __p0_589; \ - float32x2_t __s1_589 = __p1_589; \ - __ret_589 = vmulxs_f32(__s0_589, vget_lane_f32(__s1_589, __p2_589)); \ - __ret_589; \ +#define vmulxs_lane_f32(__p0_610, __p1_610, __p2_610) __extension__ ({ \ + float32_t __ret_610; \ + float32_t __s0_610 = __p0_610; \ + float32x2_t __s1_610 = __p1_610; \ + __ret_610 = vmulxs_f32(__s0_610, vget_lane_f32(__s1_610, __p2_610)); \ + __ret_610; \ }) #else -#define vmulxs_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ - float32_t __ret_590; \ - float32_t __s0_590 = __p0_590; \ - float32x2_t __s1_590 = __p1_590; \ - float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ - __ret_590 = vmulxs_f32(__s0_590, __noswap_vget_lane_f32(__rev1_590, __p2_590)); \ - __ret_590; \ +#define vmulxs_lane_f32(__p0_611, __p1_611, __p2_611) __extension__ ({ \ + float32_t __ret_611; \ + float32_t __s0_611 = __p0_611; \ + float32x2_t __s1_611 = __p1_611; \ + float32x2_t __rev1_611; __rev1_611 = __builtin_shufflevector(__s1_611, __s1_611, 1, 0); \ + __ret_611 = vmulxs_f32(__s0_611, __noswap_vget_lane_f32(__rev1_611, __p2_611)); \ + __ret_611; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f64(__p0_591, __p1_591, __p2_591) __extension__ ({ \ - float64x2_t __ret_591; \ - float64x2_t __s0_591 = __p0_591; \ - float64x1_t __s1_591 = __p1_591; \ - __ret_591 = vmulxq_f64(__s0_591, splatq_lane_f64(__s1_591, __p2_591)); \ - __ret_591; \ +#define vmulxq_lane_f64(__p0_612, __p1_612, __p2_612) __extension__ ({ \ + float64x2_t __ret_612; \ + float64x2_t __s0_612 = __p0_612; \ + float64x1_t __s1_612 = __p1_612; \ + __ret_612 = vmulxq_f64(__s0_612, splatq_lane_f64(__s1_612, __p2_612)); \ + __ret_612; \ }) #else -#define vmulxq_lane_f64(__p0_592, __p1_592, __p2_592) __extension__ ({ \ - float64x2_t __ret_592; \ - float64x2_t __s0_592 = __p0_592; \ - float64x1_t __s1_592 = __p1_592; \ - float64x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ - __ret_592 = __noswap_vmulxq_f64(__rev0_592, __noswap_splatq_lane_f64(__s1_592, __p2_592)); \ - __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ - __ret_592; \ +#define vmulxq_lane_f64(__p0_613, __p1_613, __p2_613) __extension__ ({ \ + float64x2_t __ret_613; \ + float64x2_t __s0_613 = __p0_613; \ + float64x1_t __s1_613 = __p1_613; \ + float64x2_t __rev0_613; __rev0_613 = __builtin_shufflevector(__s0_613, __s0_613, 1, 0); \ + __ret_613 = __noswap_vmulxq_f64(__rev0_613, __noswap_splatq_lane_f64(__s1_613, __p2_613)); \ + __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 1, 0); \ + __ret_613; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ - float32x4_t __ret_593; \ - float32x4_t __s0_593 = __p0_593; \ - float32x2_t __s1_593 = __p1_593; \ - __ret_593 = vmulxq_f32(__s0_593, splatq_lane_f32(__s1_593, __p2_593)); \ - __ret_593; \ +#define vmulxq_lane_f32(__p0_614, __p1_614, __p2_614) __extension__ ({ \ + float32x4_t __ret_614; \ + float32x4_t __s0_614 = __p0_614; \ + float32x2_t __s1_614 = __p1_614; \ + __ret_614 = vmulxq_f32(__s0_614, splatq_lane_f32(__s1_614, __p2_614)); \ + __ret_614; \ }) #else -#define vmulxq_lane_f32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ - float32x4_t __ret_594; \ - float32x4_t __s0_594 = __p0_594; \ - float32x2_t __s1_594 = __p1_594; \ - float32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ - float32x2_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \ - __ret_594 = __noswap_vmulxq_f32(__rev0_594, __noswap_splatq_lane_f32(__rev1_594, __p2_594)); \ - __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ - __ret_594; \ +#define vmulxq_lane_f32(__p0_615, __p1_615, __p2_615) __extension__ ({ \ + float32x4_t __ret_615; \ + float32x4_t __s0_615 = __p0_615; \ + float32x2_t __s1_615 = __p1_615; \ + float32x4_t __rev0_615; __rev0_615 = __builtin_shufflevector(__s0_615, __s0_615, 3, 2, 1, 0); \ + float32x2_t __rev1_615; __rev1_615 = __builtin_shufflevector(__s1_615, __s1_615, 1, 0); \ + __ret_615 = __noswap_vmulxq_f32(__rev0_615, __noswap_splatq_lane_f32(__rev1_615, __p2_615)); \ + __ret_615 = __builtin_shufflevector(__ret_615, __ret_615, 3, 2, 1, 0); \ + __ret_615; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \ - float32x2_t __ret_595; \ - float32x2_t __s0_595 = __p0_595; \ - float32x2_t __s1_595 = __p1_595; \ - __ret_595 = vmulx_f32(__s0_595, splat_lane_f32(__s1_595, __p2_595)); \ - __ret_595; \ +#define vmulx_lane_f32(__p0_616, __p1_616, __p2_616) __extension__ ({ \ + float32x2_t __ret_616; \ + float32x2_t __s0_616 = __p0_616; \ + float32x2_t __s1_616 = __p1_616; \ + __ret_616 = vmulx_f32(__s0_616, splat_lane_f32(__s1_616, __p2_616)); \ + __ret_616; \ }) #else -#define vmulx_lane_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \ - float32x2_t __ret_596; \ - float32x2_t __s0_596 = __p0_596; \ - float32x2_t __s1_596 = __p1_596; \ - float32x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ - float32x2_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \ - __ret_596 = __noswap_vmulx_f32(__rev0_596, __noswap_splat_lane_f32(__rev1_596, __p2_596)); \ - __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ - __ret_596; \ +#define vmulx_lane_f32(__p0_617, __p1_617, __p2_617) __extension__ ({ \ + float32x2_t __ret_617; \ + float32x2_t __s0_617 = __p0_617; \ + float32x2_t __s1_617 = __p1_617; \ + float32x2_t __rev0_617; __rev0_617 = __builtin_shufflevector(__s0_617, __s0_617, 1, 0); \ + float32x2_t __rev1_617; __rev1_617 = __builtin_shufflevector(__s1_617, __s1_617, 1, 0); \ + __ret_617 = __noswap_vmulx_f32(__rev0_617, __noswap_splat_lane_f32(__rev1_617, __p2_617)); \ + __ret_617 = __builtin_shufflevector(__ret_617, __ret_617, 1, 0); \ + __ret_617; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxd_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \ - float64_t __ret_597; \ - float64_t __s0_597 = __p0_597; \ - float64x2_t __s1_597 = __p1_597; \ - __ret_597 = vmulxd_f64(__s0_597, vgetq_lane_f64(__s1_597, __p2_597)); \ - __ret_597; \ +#define vmulxd_laneq_f64(__p0_618, __p1_618, __p2_618) __extension__ ({ \ + float64_t __ret_618; \ + float64_t __s0_618 = __p0_618; \ + float64x2_t __s1_618 = __p1_618; \ + __ret_618 = vmulxd_f64(__s0_618, vgetq_lane_f64(__s1_618, __p2_618)); \ + __ret_618; \ }) #else -#define vmulxd_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \ - float64_t __ret_598; \ - float64_t __s0_598 = __p0_598; \ - float64x2_t __s1_598 = __p1_598; \ - float64x2_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \ - __ret_598 = vmulxd_f64(__s0_598, __noswap_vgetq_lane_f64(__rev1_598, __p2_598)); \ - __ret_598; \ +#define vmulxd_laneq_f64(__p0_619, __p1_619, __p2_619) __extension__ ({ \ + float64_t __ret_619; \ + float64_t __s0_619 = __p0_619; \ + float64x2_t __s1_619 = __p1_619; \ + float64x2_t __rev1_619; __rev1_619 = __builtin_shufflevector(__s1_619, __s1_619, 1, 0); \ + __ret_619 = vmulxd_f64(__s0_619, __noswap_vgetq_lane_f64(__rev1_619, __p2_619)); \ + __ret_619; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxs_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \ - float32_t __ret_599; \ - float32_t __s0_599 = __p0_599; \ - float32x4_t __s1_599 = __p1_599; \ - __ret_599 = vmulxs_f32(__s0_599, vgetq_lane_f32(__s1_599, __p2_599)); \ - __ret_599; \ +#define vmulxs_laneq_f32(__p0_620, __p1_620, __p2_620) __extension__ ({ \ + float32_t __ret_620; \ + float32_t __s0_620 = __p0_620; \ + float32x4_t __s1_620 = __p1_620; \ + __ret_620 = vmulxs_f32(__s0_620, vgetq_lane_f32(__s1_620, __p2_620)); \ + __ret_620; \ }) #else -#define vmulxs_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \ - float32_t __ret_600; \ - float32_t __s0_600 = __p0_600; \ - float32x4_t __s1_600 = __p1_600; \ - float32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ - __ret_600 = vmulxs_f32(__s0_600, __noswap_vgetq_lane_f32(__rev1_600, __p2_600)); \ - __ret_600; \ +#define vmulxs_laneq_f32(__p0_621, __p1_621, __p2_621) __extension__ ({ \ + float32_t __ret_621; \ + float32_t __s0_621 = __p0_621; \ + float32x4_t __s1_621 = __p1_621; \ + float32x4_t __rev1_621; __rev1_621 = __builtin_shufflevector(__s1_621, __s1_621, 3, 2, 1, 0); \ + __ret_621 = vmulxs_f32(__s0_621, __noswap_vgetq_lane_f32(__rev1_621, __p2_621)); \ + __ret_621; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f64(__p0_601, __p1_601, __p2_601) __extension__ ({ \ - float64x2_t __ret_601; \ - float64x2_t __s0_601 = __p0_601; \ - float64x2_t __s1_601 = __p1_601; \ - __ret_601 = vmulxq_f64(__s0_601, splatq_laneq_f64(__s1_601, __p2_601)); \ - __ret_601; \ +#define vmulxq_laneq_f64(__p0_622, __p1_622, __p2_622) __extension__ ({ \ + float64x2_t __ret_622; \ + float64x2_t __s0_622 = __p0_622; \ + float64x2_t __s1_622 = __p1_622; \ + __ret_622 = vmulxq_f64(__s0_622, splatq_laneq_f64(__s1_622, __p2_622)); \ + __ret_622; \ }) #else -#define vmulxq_laneq_f64(__p0_602, __p1_602, __p2_602) __extension__ ({ \ - float64x2_t __ret_602; \ - float64x2_t __s0_602 = __p0_602; \ - float64x2_t __s1_602 = __p1_602; \ - float64x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ - float64x2_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 1, 0); \ - __ret_602 = __noswap_vmulxq_f64(__rev0_602, __noswap_splatq_laneq_f64(__rev1_602, __p2_602)); \ - __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ - __ret_602; \ +#define vmulxq_laneq_f64(__p0_623, __p1_623, __p2_623) __extension__ ({ \ + float64x2_t __ret_623; \ + float64x2_t __s0_623 = __p0_623; \ + float64x2_t __s1_623 = __p1_623; \ + float64x2_t __rev0_623; __rev0_623 = __builtin_shufflevector(__s0_623, __s0_623, 1, 0); \ + float64x2_t __rev1_623; __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \ + __ret_623 = __noswap_vmulxq_f64(__rev0_623, __noswap_splatq_laneq_f64(__rev1_623, __p2_623)); \ + __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0); \ + __ret_623; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f32(__p0_603, __p1_603, __p2_603) __extension__ ({ \ - float32x4_t __ret_603; \ - float32x4_t __s0_603 = __p0_603; \ - float32x4_t __s1_603 = __p1_603; \ - __ret_603 = vmulxq_f32(__s0_603, splatq_laneq_f32(__s1_603, __p2_603)); \ - __ret_603; \ +#define vmulxq_laneq_f32(__p0_624, __p1_624, __p2_624) __extension__ ({ \ + float32x4_t __ret_624; \ + float32x4_t __s0_624 = __p0_624; \ + float32x4_t __s1_624 = __p1_624; \ + __ret_624 = vmulxq_f32(__s0_624, splatq_laneq_f32(__s1_624, __p2_624)); \ + __ret_624; \ }) #else -#define vmulxq_laneq_f32(__p0_604, __p1_604, __p2_604) __extension__ ({ \ - float32x4_t __ret_604; \ - float32x4_t __s0_604 = __p0_604; \ - float32x4_t __s1_604 = __p1_604; \ - float32x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ - float32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ - __ret_604 = __noswap_vmulxq_f32(__rev0_604, __noswap_splatq_laneq_f32(__rev1_604, __p2_604)); \ - __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ - __ret_604; \ +#define vmulxq_laneq_f32(__p0_625, __p1_625, __p2_625) __extension__ ({ \ + float32x4_t __ret_625; \ + float32x4_t __s0_625 = __p0_625; \ + float32x4_t __s1_625 = __p1_625; \ + float32x4_t __rev0_625; __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 3, 2, 1, 0); \ + float32x4_t __rev1_625; __rev1_625 = __builtin_shufflevector(__s1_625, __s1_625, 3, 2, 1, 0); \ + __ret_625 = __noswap_vmulxq_f32(__rev0_625, __noswap_splatq_laneq_f32(__rev1_625, __p2_625)); \ + __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 3, 2, 1, 0); \ + __ret_625; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ - float32x2_t __ret_605; \ - float32x2_t __s0_605 = __p0_605; \ - float32x4_t __s1_605 = __p1_605; \ - __ret_605 = vmulx_f32(__s0_605, splat_laneq_f32(__s1_605, __p2_605)); \ - __ret_605; \ +#define vmulx_laneq_f32(__p0_626, __p1_626, __p2_626) __extension__ ({ \ + float32x2_t __ret_626; \ + float32x2_t __s0_626 = __p0_626; \ + float32x4_t __s1_626 = __p1_626; \ + __ret_626 = vmulx_f32(__s0_626, splat_laneq_f32(__s1_626, __p2_626)); \ + __ret_626; \ }) #else -#define vmulx_laneq_f32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ - float32x2_t __ret_606; \ - float32x2_t __s0_606 = __p0_606; \ - float32x4_t __s1_606 = __p1_606; \ - float32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ - float32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ - __ret_606 = __noswap_vmulx_f32(__rev0_606, __noswap_splat_laneq_f32(__rev1_606, __p2_606)); \ - __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ - __ret_606; \ +#define vmulx_laneq_f32(__p0_627, __p1_627, __p2_627) __extension__ ({ \ + float32x2_t __ret_627; \ + float32x2_t __s0_627 = __p0_627; \ + float32x4_t __s1_627 = __p1_627; \ + float32x2_t __rev0_627; __rev0_627 = __builtin_shufflevector(__s0_627, __s0_627, 1, 0); \ + float32x4_t __rev1_627; __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 3, 2, 1, 0); \ + __ret_627 = __noswap_vmulx_f32(__rev0_627, __noswap_splat_laneq_f32(__rev1_627, __p2_627)); \ + __ret_627 = __builtin_shufflevector(__ret_627, __ret_627, 1, 0); \ + __ret_627; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vnegq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = -__p0; return __ret; } #else -__ai float64x2_t vnegq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; @@ -51588,13 +53334,13 @@ __ai float64x2_t vnegq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vnegq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = -__p0; return __ret; } #else -__ai int64x2_t vnegq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = -__rev0; @@ -51603,29 +53349,29 @@ __ai int64x2_t vnegq_s64(int64x2_t __p0) { } #endif -__ai float64x1_t vneg_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vneg_f64(float64x1_t __p0) { float64x1_t __ret; __ret = -__p0; return __ret; } -__ai int64x1_t vneg_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vneg_s64(int64x1_t __p0) { int64x1_t __ret; __ret = -__p0; return __ret; } -__ai int64_t vnegd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vnegd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51636,13 +53382,13 @@ __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51653,13 +53399,13 @@ __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51670,13 +53416,13 @@ __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51687,13 +53433,13 @@ __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51704,13 +53450,13 @@ __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51721,13 +53467,13 @@ __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51738,13 +53484,13 @@ __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51755,13 +53501,13 @@ __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51772,13 +53518,13 @@ __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51789,13 +53535,13 @@ __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); return __ret; } #else -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { uint64_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); @@ -51804,13 +53550,13 @@ __ai uint64_t vpaddd_u64(uint64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpaddd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); return __ret; } #else -__ai float64_t vpaddd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); @@ -51819,13 +53565,13 @@ __ai float64_t vpaddd_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vpaddd_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); return __ret; } #else -__ai int64_t vpaddd_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { int64_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); @@ -51834,13 +53580,13 @@ __ai int64_t vpaddd_s64(int64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpadds_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); return __ret; } #else -__ai float32_t vpadds_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); @@ -51849,13 +53595,13 @@ __ai float32_t vpadds_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51866,13 +53612,13 @@ __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51883,13 +53629,13 @@ __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51900,13 +53646,13 @@ __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51917,13 +53663,13 @@ __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -51934,13 +53680,13 @@ __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51951,13 +53697,13 @@ __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -51968,13 +53714,13 @@ __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -51985,13 +53731,13 @@ __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); return __ret; } #else -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); @@ -52000,13 +53746,13 @@ __ai float64_t vpmaxqd_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxs_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); return __ret; } #else -__ai float32_t vpmaxs_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); @@ -52015,13 +53761,13 @@ __ai float32_t vpmaxs_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -52032,13 +53778,13 @@ __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52049,13 +53795,13 @@ __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -52066,13 +53812,13 @@ __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); return __ret; } #else -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); @@ -52081,13 +53827,13 @@ __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); return __ret; } #else -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); @@ -52096,13 +53842,13 @@ __ai float32_t vpmaxnms_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52113,13 +53859,13 @@ __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52130,13 +53876,13 @@ __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52147,13 +53893,13 @@ __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52164,13 +53910,13 @@ __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -52181,13 +53927,13 @@ __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52198,13 +53944,13 @@ __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52215,13 +53961,13 @@ __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52232,13 +53978,13 @@ __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); return __ret; } #else -__ai float64_t vpminqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); @@ -52247,13 +53993,13 @@ __ai float64_t vpminqd_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmins_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); return __ret; } #else -__ai float32_t vpmins_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); @@ -52262,13 +54008,13 @@ __ai float32_t vpmins_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -52279,13 +54025,13 @@ __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52296,13 +54042,13 @@ __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -52313,13 +54059,13 @@ __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); return __ret; } #else -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); @@ -52328,13 +54074,13 @@ __ai float64_t vpminnmqd_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpminnms_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); return __ret; } #else -__ai float32_t vpminnms_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { float32_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); @@ -52343,13 +54089,13 @@ __ai float32_t vpminnms_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); @@ -52358,89 +54104,89 @@ __ai int64x2_t vqabsq_s64(int64x2_t __p0) { } #endif -__ai int64x1_t vqabs_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vqabs_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); return __ret; } -__ai int8_t vqabsb_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vqabsb_s8(int8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); return __ret; } -__ai int32_t vqabss_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vqabss_s32(int32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); return __ret; } -__ai int64_t vqabsd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vqabsd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); return __ret; } -__ai int16_t vqabsh_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16_t vqabsh_s16(int16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); return __ret; } -__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { +__ai __attribute__((target("neon"))) uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); return __ret; } -__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); return __ret; } -__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); return __ret; } -__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); return __ret; } -__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); return __ret; } -__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqadds_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); return __ret; } -__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); return __ret; } -__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); return __ret; } -__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); return __ret; } -__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52452,13 +54198,13 @@ __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52470,109 +54216,109 @@ __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ - int64x2_t __ret_607; \ - int64x2_t __s0_607 = __p0_607; \ - int32x4_t __s1_607 = __p1_607; \ - int32x2_t __s2_607 = __p2_607; \ - __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_lane_s32(__s2_607, __p3_607)); \ - __ret_607; \ +#define vqdmlal_high_lane_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ + int64x2_t __ret_628; \ + int64x2_t __s0_628 = __p0_628; \ + int32x4_t __s1_628 = __p1_628; \ + int32x2_t __s2_628 = __p2_628; \ + __ret_628 = vqdmlal_s32(__s0_628, vget_high_s32(__s1_628), splat_lane_s32(__s2_628, __p3_628)); \ + __ret_628; \ }) #else -#define vqdmlal_high_lane_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ - int64x2_t __ret_608; \ - int64x2_t __s0_608 = __p0_608; \ - int32x4_t __s1_608 = __p1_608; \ - int32x2_t __s2_608 = __p2_608; \ - int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ - int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ - int32x2_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 1, 0); \ - __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_lane_s32(__rev2_608, __p3_608)); \ - __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ - __ret_608; \ +#define vqdmlal_high_lane_s32(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ + int64x2_t __ret_629; \ + int64x2_t __s0_629 = __p0_629; \ + int32x4_t __s1_629 = __p1_629; \ + int32x2_t __s2_629 = __p2_629; \ + int64x2_t __rev0_629; __rev0_629 = __builtin_shufflevector(__s0_629, __s0_629, 1, 0); \ + int32x4_t __rev1_629; __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \ + int32x2_t __rev2_629; __rev2_629 = __builtin_shufflevector(__s2_629, __s2_629, 1, 0); \ + __ret_629 = __noswap_vqdmlal_s32(__rev0_629, __noswap_vget_high_s32(__rev1_629), __noswap_splat_lane_s32(__rev2_629, __p3_629)); \ + __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 1, 0); \ + __ret_629; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ - int32x4_t __ret_609; \ - int32x4_t __s0_609 = __p0_609; \ - int16x8_t __s1_609 = __p1_609; \ - int16x4_t __s2_609 = __p2_609; \ - __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_lane_s16(__s2_609, __p3_609)); \ - __ret_609; \ +#define vqdmlal_high_lane_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ + int32x4_t __ret_630; \ + int32x4_t __s0_630 = __p0_630; \ + int16x8_t __s1_630 = __p1_630; \ + int16x4_t __s2_630 = __p2_630; \ + __ret_630 = vqdmlal_s16(__s0_630, vget_high_s16(__s1_630), splat_lane_s16(__s2_630, __p3_630)); \ + __ret_630; \ }) #else -#define vqdmlal_high_lane_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ - int32x4_t __ret_610; \ - int32x4_t __s0_610 = __p0_610; \ - int16x8_t __s1_610 = __p1_610; \ - int16x4_t __s2_610 = __p2_610; \ - int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ - int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 3, 2, 1, 0); \ - __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_lane_s16(__rev2_610, __p3_610)); \ - __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ - __ret_610; \ +#define vqdmlal_high_lane_s16(__p0_631, __p1_631, __p2_631, __p3_631) __extension__ ({ \ + int32x4_t __ret_631; \ + int32x4_t __s0_631 = __p0_631; \ + int16x8_t __s1_631 = __p1_631; \ + int16x4_t __s2_631 = __p2_631; \ + int32x4_t __rev0_631; __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \ + int16x8_t __rev1_631; __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_631; __rev2_631 = __builtin_shufflevector(__s2_631, __s2_631, 3, 2, 1, 0); \ + __ret_631 = __noswap_vqdmlal_s16(__rev0_631, __noswap_vget_high_s16(__rev1_631), __noswap_splat_lane_s16(__rev2_631, __p3_631)); \ + __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \ + __ret_631; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ - int64x2_t __ret_611; \ - int64x2_t __s0_611 = __p0_611; \ - int32x4_t __s1_611 = __p1_611; \ - int32x4_t __s2_611 = __p2_611; \ - __ret_611 = vqdmlal_s32(__s0_611, vget_high_s32(__s1_611), splat_laneq_s32(__s2_611, __p3_611)); \ - __ret_611; \ +#define vqdmlal_high_laneq_s32(__p0_632, __p1_632, __p2_632, __p3_632) __extension__ ({ \ + int64x2_t __ret_632; \ + int64x2_t __s0_632 = __p0_632; \ + int32x4_t __s1_632 = __p1_632; \ + int32x4_t __s2_632 = __p2_632; \ + __ret_632 = vqdmlal_s32(__s0_632, vget_high_s32(__s1_632), splat_laneq_s32(__s2_632, __p3_632)); \ + __ret_632; \ }) #else -#define vqdmlal_high_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ - int64x2_t __ret_612; \ - int64x2_t __s0_612 = __p0_612; \ - int32x4_t __s1_612 = __p1_612; \ - int32x4_t __s2_612 = __p2_612; \ - int64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ - int32x4_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 3, 2, 1, 0); \ - int32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ - __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __noswap_vget_high_s32(__rev1_612), __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \ - __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ - __ret_612; \ +#define vqdmlal_high_laneq_s32(__p0_633, __p1_633, __p2_633, __p3_633) __extension__ ({ \ + int64x2_t __ret_633; \ + int64x2_t __s0_633 = __p0_633; \ + int32x4_t __s1_633 = __p1_633; \ + int32x4_t __s2_633 = __p2_633; \ + int64x2_t __rev0_633; __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 1, 0); \ + int32x4_t __rev1_633; __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 3, 2, 1, 0); \ + int32x4_t __rev2_633; __rev2_633 = __builtin_shufflevector(__s2_633, __s2_633, 3, 2, 1, 0); \ + __ret_633 = __noswap_vqdmlal_s32(__rev0_633, __noswap_vget_high_s32(__rev1_633), __noswap_splat_laneq_s32(__rev2_633, __p3_633)); \ + __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 1, 0); \ + __ret_633; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ - int32x4_t __ret_613; \ - int32x4_t __s0_613 = __p0_613; \ - int16x8_t __s1_613 = __p1_613; \ - int16x8_t __s2_613 = __p2_613; \ - __ret_613 = vqdmlal_s16(__s0_613, vget_high_s16(__s1_613), splat_laneq_s16(__s2_613, __p3_613)); \ - __ret_613; \ +#define vqdmlal_high_laneq_s16(__p0_634, __p1_634, __p2_634, __p3_634) __extension__ ({ \ + int32x4_t __ret_634; \ + int32x4_t __s0_634 = __p0_634; \ + int16x8_t __s1_634 = __p1_634; \ + int16x8_t __s2_634 = __p2_634; \ + __ret_634 = vqdmlal_s16(__s0_634, vget_high_s16(__s1_634), splat_laneq_s16(__s2_634, __p3_634)); \ + __ret_634; \ }) #else -#define vqdmlal_high_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ - int32x4_t __ret_614; \ - int32x4_t __s0_614 = __p0_614; \ - int16x8_t __s1_614 = __p1_614; \ - int16x8_t __s2_614 = __p2_614; \ - int32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ - int16x8_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __noswap_vget_high_s16(__rev1_614), __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \ - __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ - __ret_614; \ +#define vqdmlal_high_laneq_s16(__p0_635, __p1_635, __p2_635, __p3_635) __extension__ ({ \ + int32x4_t __ret_635; \ + int32x4_t __s0_635 = __p0_635; \ + int16x8_t __s1_635 = __p1_635; \ + int16x8_t __s2_635 = __p2_635; \ + int32x4_t __rev0_635; __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 3, 2, 1, 0); \ + int16x8_t __rev1_635; __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_635; __rev2_635 = __builtin_shufflevector(__s2_635, __s2_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_635 = __noswap_vqdmlal_s16(__rev0_635, __noswap_vget_high_s16(__rev1_635), __noswap_splat_laneq_s16(__rev2_635, __p3_635)); \ + __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0); \ + __ret_635; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52583,13 +54329,13 @@ __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52684,71 +54430,71 @@ __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ - int64x2_t __ret_615; \ - int64x2_t __s0_615 = __p0_615; \ - int32x2_t __s1_615 = __p1_615; \ - int32x4_t __s2_615 = __p2_615; \ - __ret_615 = vqdmlal_s32(__s0_615, __s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ - __ret_615; \ +#define vqdmlal_laneq_s32(__p0_636, __p1_636, __p2_636, __p3_636) __extension__ ({ \ + int64x2_t __ret_636; \ + int64x2_t __s0_636 = __p0_636; \ + int32x2_t __s1_636 = __p1_636; \ + int32x4_t __s2_636 = __p2_636; \ + __ret_636 = vqdmlal_s32(__s0_636, __s1_636, splat_laneq_s32(__s2_636, __p3_636)); \ + __ret_636; \ }) #else -#define vqdmlal_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ - int64x2_t __ret_616; \ - int64x2_t __s0_616 = __p0_616; \ - int32x2_t __s1_616 = __p1_616; \ - int32x4_t __s2_616 = __p2_616; \ - int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ - int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ - int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ - __ret_616 = __noswap_vqdmlal_s32(__rev0_616, __rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ - __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ - __ret_616; \ +#define vqdmlal_laneq_s32(__p0_637, __p1_637, __p2_637, __p3_637) __extension__ ({ \ + int64x2_t __ret_637; \ + int64x2_t __s0_637 = __p0_637; \ + int32x2_t __s1_637 = __p1_637; \ + int32x4_t __s2_637 = __p2_637; \ + int64x2_t __rev0_637; __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 1, 0); \ + int32x2_t __rev1_637; __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 1, 0); \ + int32x4_t __rev2_637; __rev2_637 = __builtin_shufflevector(__s2_637, __s2_637, 3, 2, 1, 0); \ + __ret_637 = __noswap_vqdmlal_s32(__rev0_637, __rev1_637, __noswap_splat_laneq_s32(__rev2_637, __p3_637)); \ + __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 1, 0); \ + __ret_637; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ - int32x4_t __ret_617; \ - int32x4_t __s0_617 = __p0_617; \ - int16x4_t __s1_617 = __p1_617; \ - int16x8_t __s2_617 = __p2_617; \ - __ret_617 = vqdmlal_s16(__s0_617, __s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ - __ret_617; \ +#define vqdmlal_laneq_s16(__p0_638, __p1_638, __p2_638, __p3_638) __extension__ ({ \ + int32x4_t __ret_638; \ + int32x4_t __s0_638 = __p0_638; \ + int16x4_t __s1_638 = __p1_638; \ + int16x8_t __s2_638 = __p2_638; \ + __ret_638 = vqdmlal_s16(__s0_638, __s1_638, splat_laneq_s16(__s2_638, __p3_638)); \ + __ret_638; \ }) #else -#define vqdmlal_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ - int32x4_t __ret_618; \ - int32x4_t __s0_618 = __p0_618; \ - int16x4_t __s1_618 = __p1_618; \ - int16x8_t __s2_618 = __p2_618; \ - int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ - int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ - int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_618 = __noswap_vqdmlal_s16(__rev0_618, __rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ - __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ - __ret_618; \ +#define vqdmlal_laneq_s16(__p0_639, __p1_639, __p2_639, __p3_639) __extension__ ({ \ + int32x4_t __ret_639; \ + int32x4_t __s0_639 = __p0_639; \ + int16x4_t __s1_639 = __p1_639; \ + int16x8_t __s2_639 = __p2_639; \ + int32x4_t __rev0_639; __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \ + int16x4_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \ + int16x8_t __rev2_639; __rev2_639 = __builtin_shufflevector(__s2_639, __s2_639, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_639 = __noswap_vqdmlal_s16(__rev0_639, __rev1_639, __noswap_splat_laneq_s16(__rev2_639, __p3_639)); \ + __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \ + __ret_639; \ }) #endif -__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); return __ret; } -__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52760,13 +54506,13 @@ __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52778,109 +54524,109 @@ __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \ - int64x2_t __ret_619; \ - int64x2_t __s0_619 = __p0_619; \ - int32x4_t __s1_619 = __p1_619; \ - int32x2_t __s2_619 = __p2_619; \ - __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_lane_s32(__s2_619, __p3_619)); \ - __ret_619; \ +#define vqdmlsl_high_lane_s32(__p0_640, __p1_640, __p2_640, __p3_640) __extension__ ({ \ + int64x2_t __ret_640; \ + int64x2_t __s0_640 = __p0_640; \ + int32x4_t __s1_640 = __p1_640; \ + int32x2_t __s2_640 = __p2_640; \ + __ret_640 = vqdmlsl_s32(__s0_640, vget_high_s32(__s1_640), splat_lane_s32(__s2_640, __p3_640)); \ + __ret_640; \ }) #else -#define vqdmlsl_high_lane_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \ - int64x2_t __ret_620; \ - int64x2_t __s0_620 = __p0_620; \ - int32x4_t __s1_620 = __p1_620; \ - int32x2_t __s2_620 = __p2_620; \ - int64x2_t __rev0_620; __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \ - int32x4_t __rev1_620; __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \ - int32x2_t __rev2_620; __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 1, 0); \ - __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_lane_s32(__rev2_620, __p3_620)); \ - __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \ - __ret_620; \ +#define vqdmlsl_high_lane_s32(__p0_641, __p1_641, __p2_641, __p3_641) __extension__ ({ \ + int64x2_t __ret_641; \ + int64x2_t __s0_641 = __p0_641; \ + int32x4_t __s1_641 = __p1_641; \ + int32x2_t __s2_641 = __p2_641; \ + int64x2_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \ + int32x4_t __rev1_641; __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 3, 2, 1, 0); \ + int32x2_t __rev2_641; __rev2_641 = __builtin_shufflevector(__s2_641, __s2_641, 1, 0); \ + __ret_641 = __noswap_vqdmlsl_s32(__rev0_641, __noswap_vget_high_s32(__rev1_641), __noswap_splat_lane_s32(__rev2_641, __p3_641)); \ + __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \ + __ret_641; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \ - int32x4_t __ret_621; \ - int32x4_t __s0_621 = __p0_621; \ - int16x8_t __s1_621 = __p1_621; \ - int16x4_t __s2_621 = __p2_621; \ - __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_lane_s16(__s2_621, __p3_621)); \ - __ret_621; \ +#define vqdmlsl_high_lane_s16(__p0_642, __p1_642, __p2_642, __p3_642) __extension__ ({ \ + int32x4_t __ret_642; \ + int32x4_t __s0_642 = __p0_642; \ + int16x8_t __s1_642 = __p1_642; \ + int16x4_t __s2_642 = __p2_642; \ + __ret_642 = vqdmlsl_s16(__s0_642, vget_high_s16(__s1_642), splat_lane_s16(__s2_642, __p3_642)); \ + __ret_642; \ }) #else -#define vqdmlsl_high_lane_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \ - int32x4_t __ret_622; \ - int32x4_t __s0_622 = __p0_622; \ - int16x8_t __s1_622 = __p1_622; \ - int16x4_t __s2_622 = __p2_622; \ - int32x4_t __rev0_622; __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \ - int16x8_t __rev1_622; __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_622; __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 3, 2, 1, 0); \ - __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_lane_s16(__rev2_622, __p3_622)); \ - __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \ - __ret_622; \ +#define vqdmlsl_high_lane_s16(__p0_643, __p1_643, __p2_643, __p3_643) __extension__ ({ \ + int32x4_t __ret_643; \ + int32x4_t __s0_643 = __p0_643; \ + int16x8_t __s1_643 = __p1_643; \ + int16x4_t __s2_643 = __p2_643; \ + int32x4_t __rev0_643; __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 3, 2, 1, 0); \ + int16x8_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_643; __rev2_643 = __builtin_shufflevector(__s2_643, __s2_643, 3, 2, 1, 0); \ + __ret_643 = __noswap_vqdmlsl_s16(__rev0_643, __noswap_vget_high_s16(__rev1_643), __noswap_splat_lane_s16(__rev2_643, __p3_643)); \ + __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 3, 2, 1, 0); \ + __ret_643; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \ - int64x2_t __ret_623; \ - int64x2_t __s0_623 = __p0_623; \ - int32x4_t __s1_623 = __p1_623; \ - int32x4_t __s2_623 = __p2_623; \ - __ret_623 = vqdmlsl_s32(__s0_623, vget_high_s32(__s1_623), splat_laneq_s32(__s2_623, __p3_623)); \ - __ret_623; \ +#define vqdmlsl_high_laneq_s32(__p0_644, __p1_644, __p2_644, __p3_644) __extension__ ({ \ + int64x2_t __ret_644; \ + int64x2_t __s0_644 = __p0_644; \ + int32x4_t __s1_644 = __p1_644; \ + int32x4_t __s2_644 = __p2_644; \ + __ret_644 = vqdmlsl_s32(__s0_644, vget_high_s32(__s1_644), splat_laneq_s32(__s2_644, __p3_644)); \ + __ret_644; \ }) #else -#define vqdmlsl_high_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \ - int64x2_t __ret_624; \ - int64x2_t __s0_624 = __p0_624; \ - int32x4_t __s1_624 = __p1_624; \ - int32x4_t __s2_624 = __p2_624; \ - int64x2_t __rev0_624; __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \ - int32x4_t __rev1_624; __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 3, 2, 1, 0); \ - int32x4_t __rev2_624; __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \ - __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __noswap_vget_high_s32(__rev1_624), __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \ - __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \ - __ret_624; \ +#define vqdmlsl_high_laneq_s32(__p0_645, __p1_645, __p2_645, __p3_645) __extension__ ({ \ + int64x2_t __ret_645; \ + int64x2_t __s0_645 = __p0_645; \ + int32x4_t __s1_645 = __p1_645; \ + int32x4_t __s2_645 = __p2_645; \ + int64x2_t __rev0_645; __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 1, 0); \ + int32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \ + int32x4_t __rev2_645; __rev2_645 = __builtin_shufflevector(__s2_645, __s2_645, 3, 2, 1, 0); \ + __ret_645 = __noswap_vqdmlsl_s32(__rev0_645, __noswap_vget_high_s32(__rev1_645), __noswap_splat_laneq_s32(__rev2_645, __p3_645)); \ + __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 1, 0); \ + __ret_645; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \ - int32x4_t __ret_625; \ - int32x4_t __s0_625 = __p0_625; \ - int16x8_t __s1_625 = __p1_625; \ - int16x8_t __s2_625 = __p2_625; \ - __ret_625 = vqdmlsl_s16(__s0_625, vget_high_s16(__s1_625), splat_laneq_s16(__s2_625, __p3_625)); \ - __ret_625; \ +#define vqdmlsl_high_laneq_s16(__p0_646, __p1_646, __p2_646, __p3_646) __extension__ ({ \ + int32x4_t __ret_646; \ + int32x4_t __s0_646 = __p0_646; \ + int16x8_t __s1_646 = __p1_646; \ + int16x8_t __s2_646 = __p2_646; \ + __ret_646 = vqdmlsl_s16(__s0_646, vget_high_s16(__s1_646), splat_laneq_s16(__s2_646, __p3_646)); \ + __ret_646; \ }) #else -#define vqdmlsl_high_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \ - int32x4_t __ret_626; \ - int32x4_t __s0_626 = __p0_626; \ - int16x8_t __s1_626 = __p1_626; \ - int16x8_t __s2_626 = __p2_626; \ - int32x4_t __rev0_626; __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \ - int16x8_t __rev1_626; __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_626; __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __noswap_vget_high_s16(__rev1_626), __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \ - __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \ - __ret_626; \ +#define vqdmlsl_high_laneq_s16(__p0_647, __p1_647, __p2_647, __p3_647) __extension__ ({ \ + int32x4_t __ret_647; \ + int32x4_t __s0_647 = __p0_647; \ + int16x8_t __s1_647 = __p1_647; \ + int16x8_t __s2_647 = __p2_647; \ + int32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \ + int16x8_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_647; __rev2_647 = __builtin_shufflevector(__s2_647, __s2_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_647 = __noswap_vqdmlsl_s16(__rev0_647, __noswap_vget_high_s16(__rev1_647), __noswap_splat_laneq_s16(__rev2_647, __p3_647)); \ + __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \ + __ret_647; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -52891,13 +54637,13 @@ __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -52992,59 +54738,59 @@ __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s32(__p0_627, __p1_627, __p2_627, __p3_627) __extension__ ({ \ - int64x2_t __ret_627; \ - int64x2_t __s0_627 = __p0_627; \ - int32x2_t __s1_627 = __p1_627; \ - int32x4_t __s2_627 = __p2_627; \ - __ret_627 = vqdmlsl_s32(__s0_627, __s1_627, splat_laneq_s32(__s2_627, __p3_627)); \ - __ret_627; \ +#define vqdmlsl_laneq_s32(__p0_648, __p1_648, __p2_648, __p3_648) __extension__ ({ \ + int64x2_t __ret_648; \ + int64x2_t __s0_648 = __p0_648; \ + int32x2_t __s1_648 = __p1_648; \ + int32x4_t __s2_648 = __p2_648; \ + __ret_648 = vqdmlsl_s32(__s0_648, __s1_648, splat_laneq_s32(__s2_648, __p3_648)); \ + __ret_648; \ }) #else -#define vqdmlsl_laneq_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ - int64x2_t __ret_628; \ - int64x2_t __s0_628 = __p0_628; \ - int32x2_t __s1_628 = __p1_628; \ - int32x4_t __s2_628 = __p2_628; \ - int64x2_t __rev0_628; __rev0_628 = __builtin_shufflevector(__s0_628, __s0_628, 1, 0); \ - int32x2_t __rev1_628; __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \ - int32x4_t __rev2_628; __rev2_628 = __builtin_shufflevector(__s2_628, __s2_628, 3, 2, 1, 0); \ - __ret_628 = __noswap_vqdmlsl_s32(__rev0_628, __rev1_628, __noswap_splat_laneq_s32(__rev2_628, __p3_628)); \ - __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0); \ - __ret_628; \ +#define vqdmlsl_laneq_s32(__p0_649, __p1_649, __p2_649, __p3_649) __extension__ ({ \ + int64x2_t __ret_649; \ + int64x2_t __s0_649 = __p0_649; \ + int32x2_t __s1_649 = __p1_649; \ + int32x4_t __s2_649 = __p2_649; \ + int64x2_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \ + int32x2_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 1, 0); \ + int32x4_t __rev2_649; __rev2_649 = __builtin_shufflevector(__s2_649, __s2_649, 3, 2, 1, 0); \ + __ret_649 = __noswap_vqdmlsl_s32(__rev0_649, __rev1_649, __noswap_splat_laneq_s32(__rev2_649, __p3_649)); \ + __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \ + __ret_649; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s16(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ - int32x4_t __ret_629; \ - int32x4_t __s0_629 = __p0_629; \ - int16x4_t __s1_629 = __p1_629; \ - int16x8_t __s2_629 = __p2_629; \ - __ret_629 = vqdmlsl_s16(__s0_629, __s1_629, splat_laneq_s16(__s2_629, __p3_629)); \ - __ret_629; \ +#define vqdmlsl_laneq_s16(__p0_650, __p1_650, __p2_650, __p3_650) __extension__ ({ \ + int32x4_t __ret_650; \ + int32x4_t __s0_650 = __p0_650; \ + int16x4_t __s1_650 = __p1_650; \ + int16x8_t __s2_650 = __p2_650; \ + __ret_650 = vqdmlsl_s16(__s0_650, __s1_650, splat_laneq_s16(__s2_650, __p3_650)); \ + __ret_650; \ }) #else -#define vqdmlsl_laneq_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ - int32x4_t __ret_630; \ - int32x4_t __s0_630 = __p0_630; \ - int16x4_t __s1_630 = __p1_630; \ - int16x8_t __s2_630 = __p2_630; \ - int32x4_t __rev0_630; __rev0_630 = __builtin_shufflevector(__s0_630, __s0_630, 3, 2, 1, 0); \ - int16x4_t __rev1_630; __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \ - int16x8_t __rev2_630; __rev2_630 = __builtin_shufflevector(__s2_630, __s2_630, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_630 = __noswap_vqdmlsl_s16(__rev0_630, __rev1_630, __noswap_splat_laneq_s16(__rev2_630, __p3_630)); \ - __ret_630 = __builtin_shufflevector(__ret_630, __ret_630, 3, 2, 1, 0); \ - __ret_630; \ +#define vqdmlsl_laneq_s16(__p0_651, __p1_651, __p2_651, __p3_651) __extension__ ({ \ + int32x4_t __ret_651; \ + int32x4_t __s0_651 = __p0_651; \ + int16x4_t __s1_651 = __p1_651; \ + int16x8_t __s2_651 = __p2_651; \ + int32x4_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \ + int16x4_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 3, 2, 1, 0); \ + int16x8_t __rev2_651; __rev2_651 = __builtin_shufflevector(__s2_651, __s2_651, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_651 = __noswap_vqdmlsl_s16(__rev0_651, __rev1_651, __noswap_splat_laneq_s16(__rev2_651, __p3_651)); \ + __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \ + __ret_651; \ }) #endif -__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); return __ret; } -__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); return __ret; @@ -53134,78 +54880,78 @@ __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_lane_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \ - int32_t __ret_631; \ - int32_t __s0_631 = __p0_631; \ - int32x2_t __s1_631 = __p1_631; \ - __ret_631 = vqdmulhs_s32(__s0_631, vget_lane_s32(__s1_631, __p2_631)); \ - __ret_631; \ +#define vqdmulhs_lane_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + int32_t __ret_652; \ + int32_t __s0_652 = __p0_652; \ + int32x2_t __s1_652 = __p1_652; \ + __ret_652 = vqdmulhs_s32(__s0_652, vget_lane_s32(__s1_652, __p2_652)); \ + __ret_652; \ }) #else -#define vqdmulhs_lane_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \ - int32_t __ret_632; \ - int32_t __s0_632 = __p0_632; \ - int32x2_t __s1_632 = __p1_632; \ - int32x2_t __rev1_632; __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 1, 0); \ - __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vget_lane_s32(__rev1_632, __p2_632)); \ - __ret_632; \ +#define vqdmulhs_lane_s32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + int32_t __ret_653; \ + int32_t __s0_653 = __p0_653; \ + int32x2_t __s1_653 = __p1_653; \ + int32x2_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \ + __ret_653 = vqdmulhs_s32(__s0_653, __noswap_vget_lane_s32(__rev1_653, __p2_653)); \ + __ret_653; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_lane_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \ - int16_t __ret_633; \ - int16_t __s0_633 = __p0_633; \ - int16x4_t __s1_633 = __p1_633; \ - __ret_633 = vqdmulhh_s16(__s0_633, vget_lane_s16(__s1_633, __p2_633)); \ - __ret_633; \ +#define vqdmulhh_lane_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + int16_t __ret_654; \ + int16_t __s0_654 = __p0_654; \ + int16x4_t __s1_654 = __p1_654; \ + __ret_654 = vqdmulhh_s16(__s0_654, vget_lane_s16(__s1_654, __p2_654)); \ + __ret_654; \ }) #else -#define vqdmulhh_lane_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \ - int16_t __ret_634; \ - int16_t __s0_634 = __p0_634; \ - int16x4_t __s1_634 = __p1_634; \ - int16x4_t __rev1_634; __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 3, 2, 1, 0); \ - __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vget_lane_s16(__rev1_634, __p2_634)); \ - __ret_634; \ +#define vqdmulhh_lane_s16(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + int16_t __ret_655; \ + int16_t __s0_655 = __p0_655; \ + int16x4_t __s1_655 = __p1_655; \ + int16x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ + __ret_655 = vqdmulhh_s16(__s0_655, __noswap_vget_lane_s16(__rev1_655, __p2_655)); \ + __ret_655; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_laneq_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \ - int32_t __ret_635; \ - int32_t __s0_635 = __p0_635; \ - int32x4_t __s1_635 = __p1_635; \ - __ret_635 = vqdmulhs_s32(__s0_635, vgetq_lane_s32(__s1_635, __p2_635)); \ - __ret_635; \ +#define vqdmulhs_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int32_t __ret_656; \ + int32_t __s0_656 = __p0_656; \ + int32x4_t __s1_656 = __p1_656; \ + __ret_656 = vqdmulhs_s32(__s0_656, vgetq_lane_s32(__s1_656, __p2_656)); \ + __ret_656; \ }) #else -#define vqdmulhs_laneq_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \ - int32_t __ret_636; \ - int32_t __s0_636 = __p0_636; \ - int32x4_t __s1_636 = __p1_636; \ - int32x4_t __rev1_636; __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 3, 2, 1, 0); \ - __ret_636 = vqdmulhs_s32(__s0_636, __noswap_vgetq_lane_s32(__rev1_636, __p2_636)); \ - __ret_636; \ +#define vqdmulhs_laneq_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32_t __ret_657; \ + int32_t __s0_657 = __p0_657; \ + int32x4_t __s1_657 = __p1_657; \ + int32x4_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 3, 2, 1, 0); \ + __ret_657 = vqdmulhs_s32(__s0_657, __noswap_vgetq_lane_s32(__rev1_657, __p2_657)); \ + __ret_657; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_laneq_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \ - int16_t __ret_637; \ - int16_t __s0_637 = __p0_637; \ - int16x8_t __s1_637 = __p1_637; \ - __ret_637 = vqdmulhh_s16(__s0_637, vgetq_lane_s16(__s1_637, __p2_637)); \ - __ret_637; \ +#define vqdmulhh_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int16_t __ret_658; \ + int16_t __s0_658 = __p0_658; \ + int16x8_t __s1_658 = __p1_658; \ + __ret_658 = vqdmulhh_s16(__s0_658, vgetq_lane_s16(__s1_658, __p2_658)); \ + __ret_658; \ }) #else -#define vqdmulhh_laneq_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \ - int16_t __ret_638; \ - int16_t __s0_638 = __p0_638; \ - int16x8_t __s1_638 = __p1_638; \ - int16x8_t __rev1_638; __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_638 = vqdmulhh_s16(__s0_638, __noswap_vgetq_lane_s16(__rev1_638, __p2_638)); \ - __ret_638; \ +#define vqdmulhh_laneq_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int16_t __ret_659; \ + int16_t __s0_659 = __p0_659; \ + int16x8_t __s1_659 = __p1_659; \ + int16x8_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_659 = vqdmulhh_s16(__s0_659, __noswap_vgetq_lane_s16(__rev1_659, __p2_659)); \ + __ret_659; \ }) #endif @@ -53293,24 +55039,24 @@ __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { }) #endif -__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); return __ret; } -__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -53321,13 +55067,13 @@ __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -53338,97 +55084,97 @@ __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ - int64x2_t __ret_639; \ - int32x4_t __s0_639 = __p0_639; \ - int32x2_t __s1_639 = __p1_639; \ - __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_lane_s32(__s1_639, __p2_639)); \ - __ret_639; \ +#define vqdmull_high_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + int64x2_t __ret_660; \ + int32x4_t __s0_660 = __p0_660; \ + int32x2_t __s1_660 = __p1_660; \ + __ret_660 = vqdmull_s32(vget_high_s32(__s0_660), splat_lane_s32(__s1_660, __p2_660)); \ + __ret_660; \ }) #else -#define vqdmull_high_lane_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \ - int64x2_t __ret_640; \ - int32x4_t __s0_640 = __p0_640; \ - int32x2_t __s1_640 = __p1_640; \ - int32x4_t __rev0_640; __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \ - int32x2_t __rev1_640; __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 1, 0); \ - __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_lane_s32(__rev1_640, __p2_640)); \ - __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \ - __ret_640; \ +#define vqdmull_high_lane_s32(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + int64x2_t __ret_661; \ + int32x4_t __s0_661 = __p0_661; \ + int32x2_t __s1_661 = __p1_661; \ + int32x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ + int32x2_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 1, 0); \ + __ret_661 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_661), __noswap_splat_lane_s32(__rev1_661, __p2_661)); \ + __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \ + __ret_661; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \ - int32x4_t __ret_641; \ - int16x8_t __s0_641 = __p0_641; \ - int16x4_t __s1_641 = __p1_641; \ - __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_lane_s16(__s1_641, __p2_641)); \ - __ret_641; \ +#define vqdmull_high_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + int32x4_t __ret_662; \ + int16x8_t __s0_662 = __p0_662; \ + int16x4_t __s1_662 = __p1_662; \ + __ret_662 = vqdmull_s16(vget_high_s16(__s0_662), splat_lane_s16(__s1_662, __p2_662)); \ + __ret_662; \ }) #else -#define vqdmull_high_lane_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \ - int32x4_t __ret_642; \ - int16x8_t __s0_642 = __p0_642; \ - int16x4_t __s1_642 = __p1_642; \ - int16x8_t __rev0_642; __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_642; __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 3, 2, 1, 0); \ - __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_lane_s16(__rev1_642, __p2_642)); \ - __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \ - __ret_642; \ +#define vqdmull_high_lane_s16(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + int32x4_t __ret_663; \ + int16x8_t __s0_663 = __p0_663; \ + int16x4_t __s1_663 = __p1_663; \ + int16x8_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \ + __ret_663 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_663), __noswap_splat_lane_s16(__rev1_663, __p2_663)); \ + __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \ + __ret_663; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \ - int64x2_t __ret_643; \ - int32x4_t __s0_643 = __p0_643; \ - int32x4_t __s1_643 = __p1_643; \ - __ret_643 = vqdmull_s32(vget_high_s32(__s0_643), splat_laneq_s32(__s1_643, __p2_643)); \ - __ret_643; \ +#define vqdmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int64x2_t __ret_664; \ + int32x4_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + __ret_664 = vqdmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \ + __ret_664; \ }) #else -#define vqdmull_high_laneq_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ - int64x2_t __ret_644; \ - int32x4_t __s0_644 = __p0_644; \ - int32x4_t __s1_644 = __p1_644; \ - int32x4_t __rev0_644; __rev0_644 = __builtin_shufflevector(__s0_644, __s0_644, 3, 2, 1, 0); \ - int32x4_t __rev1_644; __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 3, 2, 1, 0); \ - __ret_644 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_644), __noswap_splat_laneq_s32(__rev1_644, __p2_644)); \ - __ret_644 = __builtin_shufflevector(__ret_644, __ret_644, 1, 0); \ - __ret_644; \ +#define vqdmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int64x2_t __ret_665; \ + int32x4_t __s0_665 = __p0_665; \ + int32x4_t __s1_665 = __p1_665; \ + int32x4_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \ + int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ + __ret_665 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \ + __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ + __ret_665; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \ - int32x4_t __ret_645; \ - int16x8_t __s0_645 = __p0_645; \ - int16x8_t __s1_645 = __p1_645; \ - __ret_645 = vqdmull_s16(vget_high_s16(__s0_645), splat_laneq_s16(__s1_645, __p2_645)); \ - __ret_645; \ +#define vqdmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int32x4_t __ret_666; \ + int16x8_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + __ret_666 = vqdmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \ + __ret_666; \ }) #else -#define vqdmull_high_laneq_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \ - int32x4_t __ret_646; \ - int16x8_t __s0_646 = __p0_646; \ - int16x8_t __s1_646 = __p1_646; \ - int16x8_t __rev0_646; __rev0_646 = __builtin_shufflevector(__s0_646, __s0_646, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_646; __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_646 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_646), __noswap_splat_laneq_s16(__rev1_646, __p2_646)); \ - __ret_646 = __builtin_shufflevector(__ret_646, __ret_646, 3, 2, 1, 0); \ - __ret_646; \ +#define vqdmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + int32x4_t __ret_667; \ + int16x8_t __s0_667 = __p0_667; \ + int16x8_t __s1_667 = __p1_667; \ + int16x8_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_667 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \ + __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ + __ret_667; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); @@ -53438,13 +55184,13 @@ __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); @@ -53454,161 +55200,161 @@ __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_lane_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ - int64_t __ret_647; \ - int32_t __s0_647 = __p0_647; \ - int32x2_t __s1_647 = __p1_647; \ - __ret_647 = vqdmulls_s32(__s0_647, vget_lane_s32(__s1_647, __p2_647)); \ - __ret_647; \ +#define vqdmulls_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + int64_t __ret_668; \ + int32_t __s0_668 = __p0_668; \ + int32x2_t __s1_668 = __p1_668; \ + __ret_668 = vqdmulls_s32(__s0_668, vget_lane_s32(__s1_668, __p2_668)); \ + __ret_668; \ }) #else -#define vqdmulls_lane_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \ - int64_t __ret_648; \ - int32_t __s0_648 = __p0_648; \ - int32x2_t __s1_648 = __p1_648; \ - int32x2_t __rev1_648; __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 1, 0); \ - __ret_648 = vqdmulls_s32(__s0_648, __noswap_vget_lane_s32(__rev1_648, __p2_648)); \ - __ret_648; \ +#define vqdmulls_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + int64_t __ret_669; \ + int32_t __s0_669 = __p0_669; \ + int32x2_t __s1_669 = __p1_669; \ + int32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \ + __ret_669 = vqdmulls_s32(__s0_669, __noswap_vget_lane_s32(__rev1_669, __p2_669)); \ + __ret_669; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_lane_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ - int32_t __ret_649; \ - int16_t __s0_649 = __p0_649; \ - int16x4_t __s1_649 = __p1_649; \ - __ret_649 = vqdmullh_s16(__s0_649, vget_lane_s16(__s1_649, __p2_649)); \ - __ret_649; \ +#define vqdmullh_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + int32_t __ret_670; \ + int16_t __s0_670 = __p0_670; \ + int16x4_t __s1_670 = __p1_670; \ + __ret_670 = vqdmullh_s16(__s0_670, vget_lane_s16(__s1_670, __p2_670)); \ + __ret_670; \ }) #else -#define vqdmullh_lane_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \ - int32_t __ret_650; \ - int16_t __s0_650 = __p0_650; \ - int16x4_t __s1_650 = __p1_650; \ - int16x4_t __rev1_650; __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 3, 2, 1, 0); \ - __ret_650 = vqdmullh_s16(__s0_650, __noswap_vget_lane_s16(__rev1_650, __p2_650)); \ - __ret_650; \ +#define vqdmullh_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + int32_t __ret_671; \ + int16_t __s0_671 = __p0_671; \ + int16x4_t __s1_671 = __p1_671; \ + int16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \ + __ret_671 = vqdmullh_s16(__s0_671, __noswap_vget_lane_s16(__rev1_671, __p2_671)); \ + __ret_671; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \ - int64_t __ret_651; \ - int32_t __s0_651 = __p0_651; \ - int32x4_t __s1_651 = __p1_651; \ - __ret_651 = vqdmulls_s32(__s0_651, vgetq_lane_s32(__s1_651, __p2_651)); \ - __ret_651; \ +#define vqdmulls_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + int64_t __ret_672; \ + int32_t __s0_672 = __p0_672; \ + int32x4_t __s1_672 = __p1_672; \ + __ret_672 = vqdmulls_s32(__s0_672, vgetq_lane_s32(__s1_672, __p2_672)); \ + __ret_672; \ }) #else -#define vqdmulls_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ - int64_t __ret_652; \ - int32_t __s0_652 = __p0_652; \ - int32x4_t __s1_652 = __p1_652; \ - int32x4_t __rev1_652; __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \ - __ret_652 = vqdmulls_s32(__s0_652, __noswap_vgetq_lane_s32(__rev1_652, __p2_652)); \ - __ret_652; \ +#define vqdmulls_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int64_t __ret_673; \ + int32_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + int32x4_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \ + __ret_673 = vqdmulls_s32(__s0_673, __noswap_vgetq_lane_s32(__rev1_673, __p2_673)); \ + __ret_673; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \ - int32_t __ret_653; \ - int16_t __s0_653 = __p0_653; \ - int16x8_t __s1_653 = __p1_653; \ - __ret_653 = vqdmullh_s16(__s0_653, vgetq_lane_s16(__s1_653, __p2_653)); \ - __ret_653; \ +#define vqdmullh_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int32_t __ret_674; \ + int16_t __s0_674 = __p0_674; \ + int16x8_t __s1_674 = __p1_674; \ + __ret_674 = vqdmullh_s16(__s0_674, vgetq_lane_s16(__s1_674, __p2_674)); \ + __ret_674; \ }) #else -#define vqdmullh_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ - int32_t __ret_654; \ - int16_t __s0_654 = __p0_654; \ - int16x8_t __s1_654 = __p1_654; \ - int16x8_t __rev1_654; __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_654 = vqdmullh_s16(__s0_654, __noswap_vgetq_lane_s16(__rev1_654, __p2_654)); \ - __ret_654; \ +#define vqdmullh_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int32_t __ret_675; \ + int16_t __s0_675 = __p0_675; \ + int16x8_t __s1_675 = __p1_675; \ + int16x8_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_675 = vqdmullh_s16(__s0_675, __noswap_vgetq_lane_s16(__rev1_675, __p2_675)); \ + __ret_675; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ - int64x2_t __ret_655; \ - int32x2_t __s0_655 = __p0_655; \ - int32x4_t __s1_655 = __p1_655; \ - __ret_655 = vqdmull_s32(__s0_655, splat_laneq_s32(__s1_655, __p2_655)); \ - __ret_655; \ +#define vqdmull_laneq_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + int64x2_t __ret_676; \ + int32x2_t __s0_676 = __p0_676; \ + int32x4_t __s1_676 = __p1_676; \ + __ret_676 = vqdmull_s32(__s0_676, splat_laneq_s32(__s1_676, __p2_676)); \ + __ret_676; \ }) #else -#define vqdmull_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ - int64x2_t __ret_656; \ - int32x2_t __s0_656 = __p0_656; \ - int32x4_t __s1_656 = __p1_656; \ - int32x2_t __rev0_656; __rev0_656 = __builtin_shufflevector(__s0_656, __s0_656, 1, 0); \ - int32x4_t __rev1_656; __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 3, 2, 1, 0); \ - __ret_656 = __noswap_vqdmull_s32(__rev0_656, __noswap_splat_laneq_s32(__rev1_656, __p2_656)); \ - __ret_656 = __builtin_shufflevector(__ret_656, __ret_656, 1, 0); \ - __ret_656; \ +#define vqdmull_laneq_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + int64x2_t __ret_677; \ + int32x2_t __s0_677 = __p0_677; \ + int32x4_t __s1_677 = __p1_677; \ + int32x2_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 1, 0); \ + int32x4_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \ + __ret_677 = __noswap_vqdmull_s32(__rev0_677, __noswap_splat_laneq_s32(__rev1_677, __p2_677)); \ + __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \ + __ret_677; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ - int32x4_t __ret_657; \ - int16x4_t __s0_657 = __p0_657; \ - int16x8_t __s1_657 = __p1_657; \ - __ret_657 = vqdmull_s16(__s0_657, splat_laneq_s16(__s1_657, __p2_657)); \ - __ret_657; \ +#define vqdmull_laneq_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + int32x4_t __ret_678; \ + int16x4_t __s0_678 = __p0_678; \ + int16x8_t __s1_678 = __p1_678; \ + __ret_678 = vqdmull_s16(__s0_678, splat_laneq_s16(__s1_678, __p2_678)); \ + __ret_678; \ }) #else -#define vqdmull_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ - int32x4_t __ret_658; \ - int16x4_t __s0_658 = __p0_658; \ - int16x8_t __s1_658 = __p1_658; \ - int16x4_t __rev0_658; __rev0_658 = __builtin_shufflevector(__s0_658, __s0_658, 3, 2, 1, 0); \ - int16x8_t __rev1_658; __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_658 = __noswap_vqdmull_s16(__rev0_658, __noswap_splat_laneq_s16(__rev1_658, __p2_658)); \ - __ret_658 = __builtin_shufflevector(__ret_658, __ret_658, 3, 2, 1, 0); \ - __ret_658; \ +#define vqdmull_laneq_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + int32x4_t __ret_679; \ + int16x4_t __s0_679 = __p0_679; \ + int16x8_t __s1_679 = __p1_679; \ + int16x4_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 3, 2, 1, 0); \ + int16x8_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_679 = __noswap_vqdmull_s16(__rev0_679, __noswap_splat_laneq_s16(__rev1_679, __p2_679)); \ + __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \ + __ret_679; \ }) #endif -__ai int16_t vqmovns_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int16_t vqmovns_s32(int32_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); return __ret; } -__ai int32_t vqmovnd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int32_t vqmovnd_s64(int64_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); return __ret; } -__ai int8_t vqmovnh_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int8_t vqmovnh_s16(int16_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); return __ret; } -__ai uint16_t vqmovns_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vqmovns_u32(uint32_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); return __ret; } -__ai uint32_t vqmovnd_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vqmovnd_u64(uint64_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); return __ret; } -__ai uint8_t vqmovnh_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vqmovnh_u16(uint16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); return __ret; } #else -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -53619,13 +55365,13 @@ __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); return __ret; } #else -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -53636,13 +55382,13 @@ __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); return __ret; } #else -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -53653,13 +55399,13 @@ __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); return __ret; } #else -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -53670,13 +55416,13 @@ __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); return __ret; } #else -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -53687,13 +55433,13 @@ __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); return __ret; } #else -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -53703,29 +55449,29 @@ __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { } #endif -__ai uint16_t vqmovuns_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) uint16_t vqmovuns_s32(int32_t __p0) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); return __ret; } -__ai uint32_t vqmovund_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) uint32_t vqmovund_s64(int64_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); return __ret; } -__ai uint8_t vqmovunh_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) uint8_t vqmovunh_s16(int16_t __p0) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { uint16x8_t __ret; __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); return __ret; } #else -__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -53736,13 +55482,13 @@ __ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { uint32x4_t __ret; __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); return __ret; } #else -__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -53753,13 +55499,13 @@ __ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); return __ret; } #else -__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -53770,13 +55516,13 @@ __ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); @@ -53785,37 +55531,37 @@ __ai int64x2_t vqnegq_s64(int64x2_t __p0) { } #endif -__ai int64x1_t vqneg_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vqneg_s64(int64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); return __ret; } -__ai int8_t vqnegb_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8_t vqnegb_s8(int8_t __p0) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); return __ret; } -__ai int32_t vqnegs_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32_t vqnegs_s32(int32_t __p0) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); return __ret; } -__ai int64_t vqnegd_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64_t vqnegd_s64(int64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); return __ret; } -__ai int16_t vqnegh_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16_t vqnegh_s16(int16_t __p0) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); return __ret; } -__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); return __ret; } -__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); return __ret; @@ -53905,78 +55651,78 @@ __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_lane_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ - int32_t __ret_659; \ - int32_t __s0_659 = __p0_659; \ - int32x2_t __s1_659 = __p1_659; \ - __ret_659 = vqrdmulhs_s32(__s0_659, vget_lane_s32(__s1_659, __p2_659)); \ - __ret_659; \ +#define vqrdmulhs_lane_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + int32_t __ret_680; \ + int32_t __s0_680 = __p0_680; \ + int32x2_t __s1_680 = __p1_680; \ + __ret_680 = vqrdmulhs_s32(__s0_680, vget_lane_s32(__s1_680, __p2_680)); \ + __ret_680; \ }) #else -#define vqrdmulhs_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ - int32_t __ret_660; \ - int32_t __s0_660 = __p0_660; \ - int32x2_t __s1_660 = __p1_660; \ - int32x2_t __rev1_660; __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 1, 0); \ - __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vget_lane_s32(__rev1_660, __p2_660)); \ - __ret_660; \ +#define vqrdmulhs_lane_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + int32_t __ret_681; \ + int32_t __s0_681 = __p0_681; \ + int32x2_t __s1_681 = __p1_681; \ + int32x2_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 1, 0); \ + __ret_681 = vqrdmulhs_s32(__s0_681, __noswap_vget_lane_s32(__rev1_681, __p2_681)); \ + __ret_681; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_lane_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ - int16_t __ret_661; \ - int16_t __s0_661 = __p0_661; \ - int16x4_t __s1_661 = __p1_661; \ - __ret_661 = vqrdmulhh_s16(__s0_661, vget_lane_s16(__s1_661, __p2_661)); \ - __ret_661; \ +#define vqrdmulhh_lane_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + int16_t __ret_682; \ + int16_t __s0_682 = __p0_682; \ + int16x4_t __s1_682 = __p1_682; \ + __ret_682 = vqrdmulhh_s16(__s0_682, vget_lane_s16(__s1_682, __p2_682)); \ + __ret_682; \ }) #else -#define vqrdmulhh_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ - int16_t __ret_662; \ - int16_t __s0_662 = __p0_662; \ - int16x4_t __s1_662 = __p1_662; \ - int16x4_t __rev1_662; __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 3, 2, 1, 0); \ - __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vget_lane_s16(__rev1_662, __p2_662)); \ - __ret_662; \ +#define vqrdmulhh_lane_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + int16_t __ret_683; \ + int16_t __s0_683 = __p0_683; \ + int16x4_t __s1_683 = __p1_683; \ + int16x4_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 3, 2, 1, 0); \ + __ret_683 = vqrdmulhh_s16(__s0_683, __noswap_vget_lane_s16(__rev1_683, __p2_683)); \ + __ret_683; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_laneq_s32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ - int32_t __ret_663; \ - int32_t __s0_663 = __p0_663; \ - int32x4_t __s1_663 = __p1_663; \ - __ret_663 = vqrdmulhs_s32(__s0_663, vgetq_lane_s32(__s1_663, __p2_663)); \ - __ret_663; \ +#define vqrdmulhs_laneq_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + int32_t __ret_684; \ + int32_t __s0_684 = __p0_684; \ + int32x4_t __s1_684 = __p1_684; \ + __ret_684 = vqrdmulhs_s32(__s0_684, vgetq_lane_s32(__s1_684, __p2_684)); \ + __ret_684; \ }) #else -#define vqrdmulhs_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ - int32_t __ret_664; \ - int32_t __s0_664 = __p0_664; \ - int32x4_t __s1_664 = __p1_664; \ - int32x4_t __rev1_664; __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \ - __ret_664 = vqrdmulhs_s32(__s0_664, __noswap_vgetq_lane_s32(__rev1_664, __p2_664)); \ - __ret_664; \ +#define vqrdmulhs_laneq_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + int32_t __ret_685; \ + int32_t __s0_685 = __p0_685; \ + int32x4_t __s1_685 = __p1_685; \ + int32x4_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \ + __ret_685 = vqrdmulhs_s32(__s0_685, __noswap_vgetq_lane_s32(__rev1_685, __p2_685)); \ + __ret_685; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_laneq_s16(__p0_665, __p1_665, __p2_665) __extension__ ({ \ - int16_t __ret_665; \ - int16_t __s0_665 = __p0_665; \ - int16x8_t __s1_665 = __p1_665; \ - __ret_665 = vqrdmulhh_s16(__s0_665, vgetq_lane_s16(__s1_665, __p2_665)); \ - __ret_665; \ +#define vqrdmulhh_laneq_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + int16_t __ret_686; \ + int16_t __s0_686 = __p0_686; \ + int16x8_t __s1_686 = __p1_686; \ + __ret_686 = vqrdmulhh_s16(__s0_686, vgetq_lane_s16(__s1_686, __p2_686)); \ + __ret_686; \ }) #else -#define vqrdmulhh_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ - int16_t __ret_666; \ - int16_t __s0_666 = __p0_666; \ - int16x8_t __s1_666 = __p1_666; \ - int16x8_t __rev1_666; __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_666 = vqrdmulhh_s16(__s0_666, __noswap_vgetq_lane_s16(__rev1_666, __p2_666)); \ - __ret_666; \ +#define vqrdmulhh_laneq_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + int16_t __ret_687; \ + int16_t __s0_687 = __p0_687; \ + int16x8_t __s1_687 = __p1_687; \ + int16x8_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_687 = vqrdmulhh_s16(__s0_687, __noswap_vgetq_lane_s16(__rev1_687, __p2_687)); \ + __ret_687; \ }) #endif @@ -54064,169 +55810,169 @@ __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { }) #endif -__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); return __ret; } -__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); return __ret; } -__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); return __ret; } -__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); return __ret; } -__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); return __ret; } -__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); return __ret; } -__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); return __ret; } -__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u32(__p0_667, __p1_667, __p2_667) __extension__ ({ \ - uint16x8_t __ret_667; \ - uint16x4_t __s0_667 = __p0_667; \ - uint32x4_t __s1_667 = __p1_667; \ - __ret_667 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_667), (uint16x4_t)(vqrshrn_n_u32(__s1_667, __p2_667)))); \ - __ret_667; \ +#define vqrshrn_high_n_u32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + uint16x8_t __ret_688; \ + uint16x4_t __s0_688 = __p0_688; \ + uint32x4_t __s1_688 = __p1_688; \ + __ret_688 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_688), (uint16x4_t)(vqrshrn_n_u32(__s1_688, __p2_688)))); \ + __ret_688; \ }) #else -#define vqrshrn_high_n_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ - uint16x8_t __ret_668; \ - uint16x4_t __s0_668 = __p0_668; \ - uint32x4_t __s1_668 = __p1_668; \ - uint16x4_t __rev0_668; __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 3, 2, 1, 0); \ - uint32x4_t __rev1_668; __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 3, 2, 1, 0); \ - __ret_668 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_668), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_668, __p2_668)))); \ - __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_668; \ +#define vqrshrn_high_n_u32(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + uint16x8_t __ret_689; \ + uint16x4_t __s0_689 = __p0_689; \ + uint32x4_t __s1_689 = __p1_689; \ + uint16x4_t __rev0_689; __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 3, 2, 1, 0); \ + uint32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \ + __ret_689 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_689), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_689, __p2_689)))); \ + __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_689; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u64(__p0_669, __p1_669, __p2_669) __extension__ ({ \ - uint32x4_t __ret_669; \ - uint32x2_t __s0_669 = __p0_669; \ - uint64x2_t __s1_669 = __p1_669; \ - __ret_669 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_669), (uint32x2_t)(vqrshrn_n_u64(__s1_669, __p2_669)))); \ - __ret_669; \ +#define vqrshrn_high_n_u64(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + uint32x4_t __ret_690; \ + uint32x2_t __s0_690 = __p0_690; \ + uint64x2_t __s1_690 = __p1_690; \ + __ret_690 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_690), (uint32x2_t)(vqrshrn_n_u64(__s1_690, __p2_690)))); \ + __ret_690; \ }) #else -#define vqrshrn_high_n_u64(__p0_670, __p1_670, __p2_670) __extension__ ({ \ - uint32x4_t __ret_670; \ - uint32x2_t __s0_670 = __p0_670; \ - uint64x2_t __s1_670 = __p1_670; \ - uint32x2_t __rev0_670; __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 1, 0); \ - uint64x2_t __rev1_670; __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 1, 0); \ - __ret_670 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_670), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_670, __p2_670)))); \ - __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 3, 2, 1, 0); \ - __ret_670; \ +#define vqrshrn_high_n_u64(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + uint32x4_t __ret_691; \ + uint32x2_t __s0_691 = __p0_691; \ + uint64x2_t __s1_691 = __p1_691; \ + uint32x2_t __rev0_691; __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 1, 0); \ + uint64x2_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 1, 0); \ + __ret_691 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_691), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_691, __p2_691)))); \ + __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \ + __ret_691; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ - uint8x16_t __ret_671; \ - uint8x8_t __s0_671 = __p0_671; \ - uint16x8_t __s1_671 = __p1_671; \ - __ret_671 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_671), (uint8x8_t)(vqrshrn_n_u16(__s1_671, __p2_671)))); \ - __ret_671; \ +#define vqrshrn_high_n_u16(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + uint8x16_t __ret_692; \ + uint8x8_t __s0_692 = __p0_692; \ + uint16x8_t __s1_692 = __p1_692; \ + __ret_692 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_692), (uint8x8_t)(vqrshrn_n_u16(__s1_692, __p2_692)))); \ + __ret_692; \ }) #else -#define vqrshrn_high_n_u16(__p0_672, __p1_672, __p2_672) __extension__ ({ \ - uint8x16_t __ret_672; \ - uint8x8_t __s0_672 = __p0_672; \ - uint16x8_t __s1_672 = __p1_672; \ - uint8x8_t __rev0_672; __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_672; __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_672 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_672), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_672, __p2_672)))); \ - __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_672; \ +#define vqrshrn_high_n_u16(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + uint8x16_t __ret_693; \ + uint8x8_t __s0_693 = __p0_693; \ + uint16x8_t __s1_693 = __p1_693; \ + uint8x8_t __rev0_693; __rev0_693 = __builtin_shufflevector(__s0_693, __s0_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_693; __rev1_693 = __builtin_shufflevector(__s1_693, __s1_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_693), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_693, __p2_693)))); \ + __ret_693 = __builtin_shufflevector(__ret_693, __ret_693, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ - int16x8_t __ret_673; \ - int16x4_t __s0_673 = __p0_673; \ - int32x4_t __s1_673 = __p1_673; \ - __ret_673 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_673), (int16x4_t)(vqrshrn_n_s32(__s1_673, __p2_673)))); \ - __ret_673; \ +#define vqrshrn_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + int16x8_t __ret_694; \ + int16x4_t __s0_694 = __p0_694; \ + int32x4_t __s1_694 = __p1_694; \ + __ret_694 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_694), (int16x4_t)(vqrshrn_n_s32(__s1_694, __p2_694)))); \ + __ret_694; \ }) #else -#define vqrshrn_high_n_s32(__p0_674, __p1_674, __p2_674) __extension__ ({ \ - int16x8_t __ret_674; \ - int16x4_t __s0_674 = __p0_674; \ - int32x4_t __s1_674 = __p1_674; \ - int16x4_t __rev0_674; __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 3, 2, 1, 0); \ - int32x4_t __rev1_674; __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 3, 2, 1, 0); \ - __ret_674 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_674), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_674, __p2_674)))); \ - __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_674; \ +#define vqrshrn_high_n_s32(__p0_695, __p1_695, __p2_695) __extension__ ({ \ + int16x8_t __ret_695; \ + int16x4_t __s0_695 = __p0_695; \ + int32x4_t __s1_695 = __p1_695; \ + int16x4_t __rev0_695; __rev0_695 = __builtin_shufflevector(__s0_695, __s0_695, 3, 2, 1, 0); \ + int32x4_t __rev1_695; __rev1_695 = __builtin_shufflevector(__s1_695, __s1_695, 3, 2, 1, 0); \ + __ret_695 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_695), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_695, __p2_695)))); \ + __ret_695 = __builtin_shufflevector(__ret_695, __ret_695, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_695; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s64(__p0_675, __p1_675, __p2_675) __extension__ ({ \ - int32x4_t __ret_675; \ - int32x2_t __s0_675 = __p0_675; \ - int64x2_t __s1_675 = __p1_675; \ - __ret_675 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_675), (int32x2_t)(vqrshrn_n_s64(__s1_675, __p2_675)))); \ - __ret_675; \ +#define vqrshrn_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \ + int32x4_t __ret_696; \ + int32x2_t __s0_696 = __p0_696; \ + int64x2_t __s1_696 = __p1_696; \ + __ret_696 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_696), (int32x2_t)(vqrshrn_n_s64(__s1_696, __p2_696)))); \ + __ret_696; \ }) #else -#define vqrshrn_high_n_s64(__p0_676, __p1_676, __p2_676) __extension__ ({ \ - int32x4_t __ret_676; \ - int32x2_t __s0_676 = __p0_676; \ - int64x2_t __s1_676 = __p1_676; \ - int32x2_t __rev0_676; __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 1, 0); \ - int64x2_t __rev1_676; __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 1, 0); \ - __ret_676 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_676), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_676, __p2_676)))); \ - __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 3, 2, 1, 0); \ - __ret_676; \ +#define vqrshrn_high_n_s64(__p0_697, __p1_697, __p2_697) __extension__ ({ \ + int32x4_t __ret_697; \ + int32x2_t __s0_697 = __p0_697; \ + int64x2_t __s1_697 = __p1_697; \ + int32x2_t __rev0_697; __rev0_697 = __builtin_shufflevector(__s0_697, __s0_697, 1, 0); \ + int64x2_t __rev1_697; __rev1_697 = __builtin_shufflevector(__s1_697, __s1_697, 1, 0); \ + __ret_697 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_697), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_697, __p2_697)))); \ + __ret_697 = __builtin_shufflevector(__ret_697, __ret_697, 3, 2, 1, 0); \ + __ret_697; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s16(__p0_677, __p1_677, __p2_677) __extension__ ({ \ - int8x16_t __ret_677; \ - int8x8_t __s0_677 = __p0_677; \ - int16x8_t __s1_677 = __p1_677; \ - __ret_677 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_677), (int8x8_t)(vqrshrn_n_s16(__s1_677, __p2_677)))); \ - __ret_677; \ +#define vqrshrn_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \ + int8x16_t __ret_698; \ + int8x8_t __s0_698 = __p0_698; \ + int16x8_t __s1_698 = __p1_698; \ + __ret_698 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_698), (int8x8_t)(vqrshrn_n_s16(__s1_698, __p2_698)))); \ + __ret_698; \ }) #else -#define vqrshrn_high_n_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ - int8x16_t __ret_678; \ - int8x8_t __s0_678 = __p0_678; \ - int16x8_t __s1_678 = __p1_678; \ - int8x8_t __rev0_678; __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_678; __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_678 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_678), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_678, __p2_678)))); \ - __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_678; \ +#define vqrshrn_high_n_s16(__p0_699, __p1_699, __p2_699) __extension__ ({ \ + int8x16_t __ret_699; \ + int8x8_t __s0_699 = __p0_699; \ + int16x8_t __s1_699 = __p1_699; \ + int8x8_t __rev0_699; __rev0_699 = __builtin_shufflevector(__s0_699, __s0_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_699; __rev1_699 = __builtin_shufflevector(__s1_699, __s1_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_699), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_699, __p2_699)))); \ + __ret_699 = __builtin_shufflevector(__ret_699, __ret_699, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699; \ }) #endif @@ -54267,122 +56013,122 @@ __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s32(__p0_679, __p1_679, __p2_679) __extension__ ({ \ - int16x8_t __ret_679; \ - int16x4_t __s0_679 = __p0_679; \ - int32x4_t __s1_679 = __p1_679; \ - __ret_679 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_679), (int16x4_t)(vqrshrun_n_s32(__s1_679, __p2_679)))); \ - __ret_679; \ +#define vqrshrun_high_n_s32(__p0_700, __p1_700, __p2_700) __extension__ ({ \ + int16x8_t __ret_700; \ + int16x4_t __s0_700 = __p0_700; \ + int32x4_t __s1_700 = __p1_700; \ + __ret_700 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_700), (int16x4_t)(vqrshrun_n_s32(__s1_700, __p2_700)))); \ + __ret_700; \ }) #else -#define vqrshrun_high_n_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ - int16x8_t __ret_680; \ - int16x4_t __s0_680 = __p0_680; \ - int32x4_t __s1_680 = __p1_680; \ - int16x4_t __rev0_680; __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 3, 2, 1, 0); \ - int32x4_t __rev1_680; __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 3, 2, 1, 0); \ - __ret_680 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_680), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_680, __p2_680)))); \ - __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_680; \ +#define vqrshrun_high_n_s32(__p0_701, __p1_701, __p2_701) __extension__ ({ \ + int16x8_t __ret_701; \ + int16x4_t __s0_701 = __p0_701; \ + int32x4_t __s1_701 = __p1_701; \ + int16x4_t __rev0_701; __rev0_701 = __builtin_shufflevector(__s0_701, __s0_701, 3, 2, 1, 0); \ + int32x4_t __rev1_701; __rev1_701 = __builtin_shufflevector(__s1_701, __s1_701, 3, 2, 1, 0); \ + __ret_701 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_701), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_701, __p2_701)))); \ + __ret_701 = __builtin_shufflevector(__ret_701, __ret_701, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_701; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s64(__p0_681, __p1_681, __p2_681) __extension__ ({ \ - int32x4_t __ret_681; \ - int32x2_t __s0_681 = __p0_681; \ - int64x2_t __s1_681 = __p1_681; \ - __ret_681 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_681), (int32x2_t)(vqrshrun_n_s64(__s1_681, __p2_681)))); \ - __ret_681; \ +#define vqrshrun_high_n_s64(__p0_702, __p1_702, __p2_702) __extension__ ({ \ + int32x4_t __ret_702; \ + int32x2_t __s0_702 = __p0_702; \ + int64x2_t __s1_702 = __p1_702; \ + __ret_702 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_702), (int32x2_t)(vqrshrun_n_s64(__s1_702, __p2_702)))); \ + __ret_702; \ }) #else -#define vqrshrun_high_n_s64(__p0_682, __p1_682, __p2_682) __extension__ ({ \ - int32x4_t __ret_682; \ - int32x2_t __s0_682 = __p0_682; \ - int64x2_t __s1_682 = __p1_682; \ - int32x2_t __rev0_682; __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 1, 0); \ - int64x2_t __rev1_682; __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \ - __ret_682 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_682), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_682, __p2_682)))); \ - __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \ - __ret_682; \ +#define vqrshrun_high_n_s64(__p0_703, __p1_703, __p2_703) __extension__ ({ \ + int32x4_t __ret_703; \ + int32x2_t __s0_703 = __p0_703; \ + int64x2_t __s1_703 = __p1_703; \ + int32x2_t __rev0_703; __rev0_703 = __builtin_shufflevector(__s0_703, __s0_703, 1, 0); \ + int64x2_t __rev1_703; __rev1_703 = __builtin_shufflevector(__s1_703, __s1_703, 1, 0); \ + __ret_703 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_703), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_703, __p2_703)))); \ + __ret_703 = __builtin_shufflevector(__ret_703, __ret_703, 3, 2, 1, 0); \ + __ret_703; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ - int8x16_t __ret_683; \ - int8x8_t __s0_683 = __p0_683; \ - int16x8_t __s1_683 = __p1_683; \ - __ret_683 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_683), (int8x8_t)(vqrshrun_n_s16(__s1_683, __p2_683)))); \ - __ret_683; \ +#define vqrshrun_high_n_s16(__p0_704, __p1_704, __p2_704) __extension__ ({ \ + int8x16_t __ret_704; \ + int8x8_t __s0_704 = __p0_704; \ + int16x8_t __s1_704 = __p1_704; \ + __ret_704 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_704), (int8x8_t)(vqrshrun_n_s16(__s1_704, __p2_704)))); \ + __ret_704; \ }) #else -#define vqrshrun_high_n_s16(__p0_684, __p1_684, __p2_684) __extension__ ({ \ - int8x16_t __ret_684; \ - int8x8_t __s0_684 = __p0_684; \ - int16x8_t __s1_684 = __p1_684; \ - int8x8_t __rev0_684; __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_684; __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_684 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_684), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_684, __p2_684)))); \ - __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_684; \ +#define vqrshrun_high_n_s16(__p0_705, __p1_705, __p2_705) __extension__ ({ \ + int8x16_t __ret_705; \ + int8x8_t __s0_705 = __p0_705; \ + int16x8_t __s1_705 = __p1_705; \ + int8x8_t __rev0_705; __rev0_705 = __builtin_shufflevector(__s0_705, __s0_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_705; __rev1_705 = __builtin_shufflevector(__s1_705, __s1_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_705), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_705, __p2_705)))); \ + __ret_705 = __builtin_shufflevector(__ret_705, __ret_705, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705; \ }) #endif #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ + uint16_t __ret; \ int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret = (uint16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ __ret; \ }) #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ + uint32_t __ret; \ int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret = (uint32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ __ret; \ }) #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ + uint8_t __ret; \ int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret = (uint8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ __ret; \ }) -__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); return __ret; } -__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); return __ret; } -__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); return __ret; } -__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); return __ret; } -__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); return __ret; } -__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqshls_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); return __ret; } -__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); return __ret; } -__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); return __ret; @@ -54460,128 +56206,128 @@ __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ - uint16x8_t __ret_685; \ - uint16x4_t __s0_685 = __p0_685; \ - uint32x4_t __s1_685 = __p1_685; \ - __ret_685 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_685), (uint16x4_t)(vqshrn_n_u32(__s1_685, __p2_685)))); \ - __ret_685; \ +#define vqshrn_high_n_u32(__p0_706, __p1_706, __p2_706) __extension__ ({ \ + uint16x8_t __ret_706; \ + uint16x4_t __s0_706 = __p0_706; \ + uint32x4_t __s1_706 = __p1_706; \ + __ret_706 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_706), (uint16x4_t)(vqshrn_n_u32(__s1_706, __p2_706)))); \ + __ret_706; \ }) #else -#define vqshrn_high_n_u32(__p0_686, __p1_686, __p2_686) __extension__ ({ \ - uint16x8_t __ret_686; \ - uint16x4_t __s0_686 = __p0_686; \ - uint32x4_t __s1_686 = __p1_686; \ - uint16x4_t __rev0_686; __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 3, 2, 1, 0); \ - uint32x4_t __rev1_686; __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 3, 2, 1, 0); \ - __ret_686 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_686), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_686, __p2_686)))); \ - __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_686; \ +#define vqshrn_high_n_u32(__p0_707, __p1_707, __p2_707) __extension__ ({ \ + uint16x8_t __ret_707; \ + uint16x4_t __s0_707 = __p0_707; \ + uint32x4_t __s1_707 = __p1_707; \ + uint16x4_t __rev0_707; __rev0_707 = __builtin_shufflevector(__s0_707, __s0_707, 3, 2, 1, 0); \ + uint32x4_t __rev1_707; __rev1_707 = __builtin_shufflevector(__s1_707, __s1_707, 3, 2, 1, 0); \ + __ret_707 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_707), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_707, __p2_707)))); \ + __ret_707 = __builtin_shufflevector(__ret_707, __ret_707, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_707; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u64(__p0_687, __p1_687, __p2_687) __extension__ ({ \ - uint32x4_t __ret_687; \ - uint32x2_t __s0_687 = __p0_687; \ - uint64x2_t __s1_687 = __p1_687; \ - __ret_687 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_687), (uint32x2_t)(vqshrn_n_u64(__s1_687, __p2_687)))); \ - __ret_687; \ +#define vqshrn_high_n_u64(__p0_708, __p1_708, __p2_708) __extension__ ({ \ + uint32x4_t __ret_708; \ + uint32x2_t __s0_708 = __p0_708; \ + uint64x2_t __s1_708 = __p1_708; \ + __ret_708 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_708), (uint32x2_t)(vqshrn_n_u64(__s1_708, __p2_708)))); \ + __ret_708; \ }) #else -#define vqshrn_high_n_u64(__p0_688, __p1_688, __p2_688) __extension__ ({ \ - uint32x4_t __ret_688; \ - uint32x2_t __s0_688 = __p0_688; \ - uint64x2_t __s1_688 = __p1_688; \ - uint32x2_t __rev0_688; __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 1, 0); \ - uint64x2_t __rev1_688; __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 1, 0); \ - __ret_688 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_688), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_688, __p2_688)))); \ - __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 3, 2, 1, 0); \ - __ret_688; \ +#define vqshrn_high_n_u64(__p0_709, __p1_709, __p2_709) __extension__ ({ \ + uint32x4_t __ret_709; \ + uint32x2_t __s0_709 = __p0_709; \ + uint64x2_t __s1_709 = __p1_709; \ + uint32x2_t __rev0_709; __rev0_709 = __builtin_shufflevector(__s0_709, __s0_709, 1, 0); \ + uint64x2_t __rev1_709; __rev1_709 = __builtin_shufflevector(__s1_709, __s1_709, 1, 0); \ + __ret_709 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_709), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_709, __p2_709)))); \ + __ret_709 = __builtin_shufflevector(__ret_709, __ret_709, 3, 2, 1, 0); \ + __ret_709; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u16(__p0_689, __p1_689, __p2_689) __extension__ ({ \ - uint8x16_t __ret_689; \ - uint8x8_t __s0_689 = __p0_689; \ - uint16x8_t __s1_689 = __p1_689; \ - __ret_689 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_689), (uint8x8_t)(vqshrn_n_u16(__s1_689, __p2_689)))); \ - __ret_689; \ +#define vqshrn_high_n_u16(__p0_710, __p1_710, __p2_710) __extension__ ({ \ + uint8x16_t __ret_710; \ + uint8x8_t __s0_710 = __p0_710; \ + uint16x8_t __s1_710 = __p1_710; \ + __ret_710 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_710), (uint8x8_t)(vqshrn_n_u16(__s1_710, __p2_710)))); \ + __ret_710; \ }) #else -#define vqshrn_high_n_u16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ - uint8x16_t __ret_690; \ - uint8x8_t __s0_690 = __p0_690; \ - uint16x8_t __s1_690 = __p1_690; \ - uint8x8_t __rev0_690; __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_690; __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_690 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_690), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_690, __p2_690)))); \ - __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_690; \ +#define vqshrn_high_n_u16(__p0_711, __p1_711, __p2_711) __extension__ ({ \ + uint8x16_t __ret_711; \ + uint8x8_t __s0_711 = __p0_711; \ + uint16x8_t __s1_711 = __p1_711; \ + uint8x8_t __rev0_711; __rev0_711 = __builtin_shufflevector(__s0_711, __s0_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_711; __rev1_711 = __builtin_shufflevector(__s1_711, __s1_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_711), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_711, __p2_711)))); \ + __ret_711 = __builtin_shufflevector(__ret_711, __ret_711, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s32(__p0_691, __p1_691, __p2_691) __extension__ ({ \ - int16x8_t __ret_691; \ - int16x4_t __s0_691 = __p0_691; \ - int32x4_t __s1_691 = __p1_691; \ - __ret_691 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_691), (int16x4_t)(vqshrn_n_s32(__s1_691, __p2_691)))); \ - __ret_691; \ +#define vqshrn_high_n_s32(__p0_712, __p1_712, __p2_712) __extension__ ({ \ + int16x8_t __ret_712; \ + int16x4_t __s0_712 = __p0_712; \ + int32x4_t __s1_712 = __p1_712; \ + __ret_712 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_712), (int16x4_t)(vqshrn_n_s32(__s1_712, __p2_712)))); \ + __ret_712; \ }) #else -#define vqshrn_high_n_s32(__p0_692, __p1_692, __p2_692) __extension__ ({ \ - int16x8_t __ret_692; \ - int16x4_t __s0_692 = __p0_692; \ - int32x4_t __s1_692 = __p1_692; \ - int16x4_t __rev0_692; __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \ - int32x4_t __rev1_692; __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \ - __ret_692 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_692), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_692, __p2_692)))); \ - __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_692; \ +#define vqshrn_high_n_s32(__p0_713, __p1_713, __p2_713) __extension__ ({ \ + int16x8_t __ret_713; \ + int16x4_t __s0_713 = __p0_713; \ + int32x4_t __s1_713 = __p1_713; \ + int16x4_t __rev0_713; __rev0_713 = __builtin_shufflevector(__s0_713, __s0_713, 3, 2, 1, 0); \ + int32x4_t __rev1_713; __rev1_713 = __builtin_shufflevector(__s1_713, __s1_713, 3, 2, 1, 0); \ + __ret_713 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_713), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_713, __p2_713)))); \ + __ret_713 = __builtin_shufflevector(__ret_713, __ret_713, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_713; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s64(__p0_693, __p1_693, __p2_693) __extension__ ({ \ - int32x4_t __ret_693; \ - int32x2_t __s0_693 = __p0_693; \ - int64x2_t __s1_693 = __p1_693; \ - __ret_693 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_693), (int32x2_t)(vqshrn_n_s64(__s1_693, __p2_693)))); \ - __ret_693; \ +#define vqshrn_high_n_s64(__p0_714, __p1_714, __p2_714) __extension__ ({ \ + int32x4_t __ret_714; \ + int32x2_t __s0_714 = __p0_714; \ + int64x2_t __s1_714 = __p1_714; \ + __ret_714 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_714), (int32x2_t)(vqshrn_n_s64(__s1_714, __p2_714)))); \ + __ret_714; \ }) #else -#define vqshrn_high_n_s64(__p0_694, __p1_694, __p2_694) __extension__ ({ \ - int32x4_t __ret_694; \ - int32x2_t __s0_694 = __p0_694; \ - int64x2_t __s1_694 = __p1_694; \ - int32x2_t __rev0_694; __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \ - int64x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ - __ret_694 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_694), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_694, __p2_694)))); \ - __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \ - __ret_694; \ +#define vqshrn_high_n_s64(__p0_715, __p1_715, __p2_715) __extension__ ({ \ + int32x4_t __ret_715; \ + int32x2_t __s0_715 = __p0_715; \ + int64x2_t __s1_715 = __p1_715; \ + int32x2_t __rev0_715; __rev0_715 = __builtin_shufflevector(__s0_715, __s0_715, 1, 0); \ + int64x2_t __rev1_715; __rev1_715 = __builtin_shufflevector(__s1_715, __s1_715, 1, 0); \ + __ret_715 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_715), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_715, __p2_715)))); \ + __ret_715 = __builtin_shufflevector(__ret_715, __ret_715, 3, 2, 1, 0); \ + __ret_715; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s16(__p0_695, __p1_695, __p2_695) __extension__ ({ \ - int8x16_t __ret_695; \ - int8x8_t __s0_695 = __p0_695; \ - int16x8_t __s1_695 = __p1_695; \ - __ret_695 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_695), (int8x8_t)(vqshrn_n_s16(__s1_695, __p2_695)))); \ - __ret_695; \ +#define vqshrn_high_n_s16(__p0_716, __p1_716, __p2_716) __extension__ ({ \ + int8x16_t __ret_716; \ + int8x8_t __s0_716 = __p0_716; \ + int16x8_t __s1_716 = __p1_716; \ + __ret_716 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_716), (int8x8_t)(vqshrn_n_s16(__s1_716, __p2_716)))); \ + __ret_716; \ }) #else -#define vqshrn_high_n_s16(__p0_696, __p1_696, __p2_696) __extension__ ({ \ - int8x16_t __ret_696; \ - int8x8_t __s0_696 = __p0_696; \ - int16x8_t __s1_696 = __p1_696; \ - int8x8_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_696; __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_696 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_696), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_696, __p2_696)))); \ - __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_696; \ +#define vqshrn_high_n_s16(__p0_717, __p1_717, __p2_717) __extension__ ({ \ + int8x16_t __ret_717; \ + int8x8_t __s0_717 = __p0_717; \ + int16x8_t __s1_717 = __p1_717; \ + int8x8_t __rev0_717; __rev0_717 = __builtin_shufflevector(__s0_717, __s0_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_717; __rev1_717 = __builtin_shufflevector(__s1_717, __s1_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_717), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_717, __p2_717)))); \ + __ret_717 = __builtin_shufflevector(__ret_717, __ret_717, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717; \ }) #endif @@ -54622,134 +56368,134 @@ __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ - int16x8_t __ret_697; \ - int16x4_t __s0_697 = __p0_697; \ - int32x4_t __s1_697 = __p1_697; \ - __ret_697 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_697), (int16x4_t)(vqshrun_n_s32(__s1_697, __p2_697)))); \ - __ret_697; \ +#define vqshrun_high_n_s32(__p0_718, __p1_718, __p2_718) __extension__ ({ \ + int16x8_t __ret_718; \ + int16x4_t __s0_718 = __p0_718; \ + int32x4_t __s1_718 = __p1_718; \ + __ret_718 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_718), (int16x4_t)(vqshrun_n_s32(__s1_718, __p2_718)))); \ + __ret_718; \ }) #else -#define vqshrun_high_n_s32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ - int16x8_t __ret_698; \ - int16x4_t __s0_698 = __p0_698; \ - int32x4_t __s1_698 = __p1_698; \ - int16x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ - int32x4_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 3, 2, 1, 0); \ - __ret_698 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_698), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_698, __p2_698)))); \ - __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_698; \ +#define vqshrun_high_n_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \ + int16x8_t __ret_719; \ + int16x4_t __s0_719 = __p0_719; \ + int32x4_t __s1_719 = __p1_719; \ + int16x4_t __rev0_719; __rev0_719 = __builtin_shufflevector(__s0_719, __s0_719, 3, 2, 1, 0); \ + int32x4_t __rev1_719; __rev1_719 = __builtin_shufflevector(__s1_719, __s1_719, 3, 2, 1, 0); \ + __ret_719 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_719), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_719, __p2_719)))); \ + __ret_719 = __builtin_shufflevector(__ret_719, __ret_719, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_719; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s64(__p0_699, __p1_699, __p2_699) __extension__ ({ \ - int32x4_t __ret_699; \ - int32x2_t __s0_699 = __p0_699; \ - int64x2_t __s1_699 = __p1_699; \ - __ret_699 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_699), (int32x2_t)(vqshrun_n_s64(__s1_699, __p2_699)))); \ - __ret_699; \ +#define vqshrun_high_n_s64(__p0_720, __p1_720, __p2_720) __extension__ ({ \ + int32x4_t __ret_720; \ + int32x2_t __s0_720 = __p0_720; \ + int64x2_t __s1_720 = __p1_720; \ + __ret_720 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_720), (int32x2_t)(vqshrun_n_s64(__s1_720, __p2_720)))); \ + __ret_720; \ }) #else -#define vqshrun_high_n_s64(__p0_700, __p1_700, __p2_700) __extension__ ({ \ - int32x4_t __ret_700; \ - int32x2_t __s0_700 = __p0_700; \ - int64x2_t __s1_700 = __p1_700; \ - int32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ - int64x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ - __ret_700 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_700), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_700, __p2_700)))); \ - __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 3, 2, 1, 0); \ - __ret_700; \ +#define vqshrun_high_n_s64(__p0_721, __p1_721, __p2_721) __extension__ ({ \ + int32x4_t __ret_721; \ + int32x2_t __s0_721 = __p0_721; \ + int64x2_t __s1_721 = __p1_721; \ + int32x2_t __rev0_721; __rev0_721 = __builtin_shufflevector(__s0_721, __s0_721, 1, 0); \ + int64x2_t __rev1_721; __rev1_721 = __builtin_shufflevector(__s1_721, __s1_721, 1, 0); \ + __ret_721 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_721), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_721, __p2_721)))); \ + __ret_721 = __builtin_shufflevector(__ret_721, __ret_721, 3, 2, 1, 0); \ + __ret_721; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s16(__p0_701, __p1_701, __p2_701) __extension__ ({ \ - int8x16_t __ret_701; \ - int8x8_t __s0_701 = __p0_701; \ - int16x8_t __s1_701 = __p1_701; \ - __ret_701 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_701), (int8x8_t)(vqshrun_n_s16(__s1_701, __p2_701)))); \ - __ret_701; \ +#define vqshrun_high_n_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \ + int8x16_t __ret_722; \ + int8x8_t __s0_722 = __p0_722; \ + int16x8_t __s1_722 = __p1_722; \ + __ret_722 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_722), (int8x8_t)(vqshrun_n_s16(__s1_722, __p2_722)))); \ + __ret_722; \ }) #else -#define vqshrun_high_n_s16(__p0_702, __p1_702, __p2_702) __extension__ ({ \ - int8x16_t __ret_702; \ - int8x8_t __s0_702 = __p0_702; \ - int16x8_t __s1_702 = __p1_702; \ - int8x8_t __rev0_702; __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_702 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_702), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_702, __p2_702)))); \ - __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_702; \ +#define vqshrun_high_n_s16(__p0_723, __p1_723, __p2_723) __extension__ ({ \ + int8x16_t __ret_723; \ + int8x8_t __s0_723 = __p0_723; \ + int16x8_t __s1_723 = __p1_723; \ + int8x8_t __rev0_723; __rev0_723 = __builtin_shufflevector(__s0_723, __s0_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_723; __rev1_723 = __builtin_shufflevector(__s1_723, __s1_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_723), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_723, __p2_723)))); \ + __ret_723 = __builtin_shufflevector(__ret_723, __ret_723, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723; \ }) #endif #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ + uint16_t __ret; \ int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret = (uint16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ __ret; \ }) #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ + uint32_t __ret; \ int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret = (uint32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ __ret; \ }) #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ + uint8_t __ret; \ int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret = (uint8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ __ret; \ }) -__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { +__ai __attribute__((target("neon"))) uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); return __ret; } -__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); return __ret; } -__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); return __ret; } -__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); return __ret; } -__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); return __ret; } -__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); return __ret; } -__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); return __ret; } -__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54760,13 +56506,13 @@ __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54777,13 +56523,13 @@ __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54794,13 +56540,13 @@ __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54811,13 +56557,13 @@ __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54828,13 +56574,13 @@ __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54845,13 +56591,13 @@ __ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54864,13 +56610,13 @@ __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54883,13 +56629,13 @@ __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54902,13 +56648,13 @@ __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54921,13 +56667,13 @@ __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54940,13 +56686,13 @@ __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x2_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54959,13 +56705,13 @@ __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54979,13 +56725,13 @@ __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -54999,13 +56745,13 @@ __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55019,13 +56765,13 @@ __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55039,13 +56785,13 @@ __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55059,13 +56805,13 @@ __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x3_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55079,13 +56825,13 @@ __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { poly8x8_t __ret; poly8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55100,13 +56846,13 @@ __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55121,13 +56867,13 @@ __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55142,13 +56888,13 @@ __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55163,13 +56909,13 @@ __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55184,13 +56930,13 @@ __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x16x4_t __rev0; __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55205,13 +56951,13 @@ __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55223,13 +56969,13 @@ __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55241,13 +56987,13 @@ __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55259,13 +57005,13 @@ __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55277,13 +57023,13 @@ __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55295,13 +57041,13 @@ __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55313,13 +57059,13 @@ __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x2_t __rev1; @@ -55333,13 +57079,13 @@ __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x2_t __rev1; @@ -55353,13 +57099,13 @@ __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x2_t __rev1; @@ -55373,13 +57119,13 @@ __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x2_t __rev1; @@ -55393,13 +57139,13 @@ __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x2_t __rev1; @@ -55413,13 +57159,13 @@ __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x2_t __rev1; @@ -55433,13 +57179,13 @@ __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x3_t __rev1; @@ -55454,13 +57200,13 @@ __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x3_t __rev1; @@ -55475,13 +57221,13 @@ __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x3_t __rev1; @@ -55496,13 +57242,13 @@ __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x3_t __rev1; @@ -55517,13 +57263,13 @@ __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x3_t __rev1; @@ -55538,13 +57284,13 @@ __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x3_t __rev1; @@ -55559,13 +57305,13 @@ __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x4_t __rev1; @@ -55581,13 +57327,13 @@ __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16x4_t __rev1; @@ -55603,13 +57349,13 @@ __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x4_t __rev1; @@ -55625,13 +57371,13 @@ __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x4_t __rev1; @@ -55647,13 +57393,13 @@ __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16x4_t __rev1; @@ -55669,13 +57415,13 @@ __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16x4_t __rev1; @@ -55691,13 +57437,13 @@ __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); return __ret; } #else -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -55709,13 +57455,13 @@ __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); return __ret; } #else -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -55727,13 +57473,13 @@ __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55745,13 +57491,13 @@ __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); return __ret; } #else -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -55763,13 +57509,13 @@ __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); return __ret; } #else -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -55781,13 +57527,13 @@ __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); return __ret; } #else -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -55799,13 +57545,13 @@ __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); return __ret; } #else -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); @@ -55815,13 +57561,13 @@ __ai poly8x8_t vrbit_p8(poly8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); return __ret; } #else -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); @@ -55831,13 +57577,13 @@ __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); @@ -55847,13 +57593,13 @@ __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); @@ -55863,13 +57609,13 @@ __ai int8x16_t vrbitq_s8(int8x16_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); @@ -55879,13 +57625,13 @@ __ai uint8x8_t vrbit_u8(uint8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrbit_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vrbit_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); @@ -55895,13 +57641,13 @@ __ai int8x8_t vrbit_s8(int8x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); @@ -55910,29 +57656,29 @@ __ai float64x2_t vrecpeq_f64(float64x2_t __p0) { } #endif -__ai float64x1_t vrecpe_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vrecpe_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); return __ret; } -__ai float64_t vrecped_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64_t vrecped_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); return __ret; } -__ai float32_t vrecpes_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vrecpes_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -55942,1997 +57688,1997 @@ __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); return __ret; } -__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); return __ret; } -__ai float64_t vrecpxd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64_t vrecpxd_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); return __ret; } -__ai float32_t vrecpxs_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vrecpxs_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { poly8x8_t __ret; __ret = (poly8x8_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { poly64x1_t __ret; __ret = (poly64x1_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { poly16x4_t __ret; __ret = (poly16x4_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { poly8x16_t __ret; __ret = (poly8x16_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { poly128_t __ret; __ret = (poly128_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { poly64x2_t __ret; __ret = (poly64x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { poly16x8_t __ret; __ret = (poly16x8_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { uint8x16_t __ret; __ret = (uint8x16_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { uint64x2_t __ret; __ret = (uint64x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { uint16x8_t __ret; __ret = (uint16x8_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { float64x1_t __ret; __ret = (float64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { float32x2_t __ret; __ret = (float32x2_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { float16x4_t __ret; __ret = (float16x4_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { int32x2_t __ret; __ret = (int32x2_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { int64x1_t __ret; __ret = (int64x1_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { int16x4_t __ret; __ret = (int16x4_t)(__p0); return __ret; } -__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); return __ret; } -__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vrshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); return __ret; @@ -57950,139 +59696,139 @@ __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ - uint16x8_t __ret_703; \ - uint16x4_t __s0_703 = __p0_703; \ - uint32x4_t __s1_703 = __p1_703; \ - __ret_703 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_703), (uint16x4_t)(vrshrn_n_u32(__s1_703, __p2_703)))); \ - __ret_703; \ +#define vrshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \ + uint16x8_t __ret_724; \ + uint16x4_t __s0_724 = __p0_724; \ + uint32x4_t __s1_724 = __p1_724; \ + __ret_724 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_724), (uint16x4_t)(vrshrn_n_u32(__s1_724, __p2_724)))); \ + __ret_724; \ }) #else -#define vrshrn_high_n_u32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ - uint16x8_t __ret_704; \ - uint16x4_t __s0_704 = __p0_704; \ - uint32x4_t __s1_704 = __p1_704; \ - uint16x4_t __rev0_704; __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \ - uint32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ - __ret_704 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_704), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_704, __p2_704)))); \ - __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_704; \ +#define vrshrn_high_n_u32(__p0_725, __p1_725, __p2_725) __extension__ ({ \ + uint16x8_t __ret_725; \ + uint16x4_t __s0_725 = __p0_725; \ + uint32x4_t __s1_725 = __p1_725; \ + uint16x4_t __rev0_725; __rev0_725 = __builtin_shufflevector(__s0_725, __s0_725, 3, 2, 1, 0); \ + uint32x4_t __rev1_725; __rev1_725 = __builtin_shufflevector(__s1_725, __s1_725, 3, 2, 1, 0); \ + __ret_725 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_725), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_725, __p2_725)))); \ + __ret_725 = __builtin_shufflevector(__ret_725, __ret_725, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_725; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ - uint32x4_t __ret_705; \ - uint32x2_t __s0_705 = __p0_705; \ - uint64x2_t __s1_705 = __p1_705; \ - __ret_705 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_705), (uint32x2_t)(vrshrn_n_u64(__s1_705, __p2_705)))); \ - __ret_705; \ +#define vrshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \ + uint32x4_t __ret_726; \ + uint32x2_t __s0_726 = __p0_726; \ + uint64x2_t __s1_726 = __p1_726; \ + __ret_726 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_726), (uint32x2_t)(vrshrn_n_u64(__s1_726, __p2_726)))); \ + __ret_726; \ }) #else -#define vrshrn_high_n_u64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ - uint32x4_t __ret_706; \ - uint32x2_t __s0_706 = __p0_706; \ - uint64x2_t __s1_706 = __p1_706; \ - uint32x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ - uint64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ - __ret_706 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_706), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_706, __p2_706)))); \ - __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \ - __ret_706; \ +#define vrshrn_high_n_u64(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + uint32x4_t __ret_727; \ + uint32x2_t __s0_727 = __p0_727; \ + uint64x2_t __s1_727 = __p1_727; \ + uint32x2_t __rev0_727; __rev0_727 = __builtin_shufflevector(__s0_727, __s0_727, 1, 0); \ + uint64x2_t __rev1_727; __rev1_727 = __builtin_shufflevector(__s1_727, __s1_727, 1, 0); \ + __ret_727 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_727), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_727, __p2_727)))); \ + __ret_727 = __builtin_shufflevector(__ret_727, __ret_727, 3, 2, 1, 0); \ + __ret_727; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u16(__p0_707, __p1_707, __p2_707) __extension__ ({ \ - uint8x16_t __ret_707; \ - uint8x8_t __s0_707 = __p0_707; \ - uint16x8_t __s1_707 = __p1_707; \ - __ret_707 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_707), (uint8x8_t)(vrshrn_n_u16(__s1_707, __p2_707)))); \ - __ret_707; \ +#define vrshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + uint8x16_t __ret_728; \ + uint8x8_t __s0_728 = __p0_728; \ + uint16x8_t __s1_728 = __p1_728; \ + __ret_728 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_728), (uint8x8_t)(vrshrn_n_u16(__s1_728, __p2_728)))); \ + __ret_728; \ }) #else -#define vrshrn_high_n_u16(__p0_708, __p1_708, __p2_708) __extension__ ({ \ - uint8x16_t __ret_708; \ - uint8x8_t __s0_708 = __p0_708; \ - uint16x8_t __s1_708 = __p1_708; \ - uint8x8_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_708 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_708), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_708, __p2_708)))); \ - __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_708; \ +#define vrshrn_high_n_u16(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + uint8x16_t __ret_729; \ + uint8x8_t __s0_729 = __p0_729; \ + uint16x8_t __s1_729 = __p1_729; \ + uint8x8_t __rev0_729; __rev0_729 = __builtin_shufflevector(__s0_729, __s0_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_729; __rev1_729 = __builtin_shufflevector(__s1_729, __s1_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_729), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_729, __p2_729)))); \ + __ret_729 = __builtin_shufflevector(__ret_729, __ret_729, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ - int16x8_t __ret_709; \ - int16x4_t __s0_709 = __p0_709; \ - int32x4_t __s1_709 = __p1_709; \ - __ret_709 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_709), (int16x4_t)(vrshrn_n_s32(__s1_709, __p2_709)))); \ - __ret_709; \ +#define vrshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + int16x8_t __ret_730; \ + int16x4_t __s0_730 = __p0_730; \ + int32x4_t __s1_730 = __p1_730; \ + __ret_730 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_730), (int16x4_t)(vrshrn_n_s32(__s1_730, __p2_730)))); \ + __ret_730; \ }) #else -#define vrshrn_high_n_s32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ - int16x8_t __ret_710; \ - int16x4_t __s0_710 = __p0_710; \ - int32x4_t __s1_710 = __p1_710; \ - int16x4_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \ - int32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ - __ret_710 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_710), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_710, __p2_710)))); \ - __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_710; \ +#define vrshrn_high_n_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + int16x8_t __ret_731; \ + int16x4_t __s0_731 = __p0_731; \ + int32x4_t __s1_731 = __p1_731; \ + int16x4_t __rev0_731; __rev0_731 = __builtin_shufflevector(__s0_731, __s0_731, 3, 2, 1, 0); \ + int32x4_t __rev1_731; __rev1_731 = __builtin_shufflevector(__s1_731, __s1_731, 3, 2, 1, 0); \ + __ret_731 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_731), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_731, __p2_731)))); \ + __ret_731 = __builtin_shufflevector(__ret_731, __ret_731, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_731; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s64(__p0_711, __p1_711, __p2_711) __extension__ ({ \ - int32x4_t __ret_711; \ - int32x2_t __s0_711 = __p0_711; \ - int64x2_t __s1_711 = __p1_711; \ - __ret_711 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_711), (int32x2_t)(vrshrn_n_s64(__s1_711, __p2_711)))); \ - __ret_711; \ +#define vrshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + int32x4_t __ret_732; \ + int32x2_t __s0_732 = __p0_732; \ + int64x2_t __s1_732 = __p1_732; \ + __ret_732 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_732), (int32x2_t)(vrshrn_n_s64(__s1_732, __p2_732)))); \ + __ret_732; \ }) #else -#define vrshrn_high_n_s64(__p0_712, __p1_712, __p2_712) __extension__ ({ \ - int32x4_t __ret_712; \ - int32x2_t __s0_712 = __p0_712; \ - int64x2_t __s1_712 = __p1_712; \ - int32x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ - int64x2_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 1, 0); \ - __ret_712 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_712), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_712, __p2_712)))); \ - __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 3, 2, 1, 0); \ - __ret_712; \ +#define vrshrn_high_n_s64(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int32x4_t __ret_733; \ + int32x2_t __s0_733 = __p0_733; \ + int64x2_t __s1_733 = __p1_733; \ + int32x2_t __rev0_733; __rev0_733 = __builtin_shufflevector(__s0_733, __s0_733, 1, 0); \ + int64x2_t __rev1_733; __rev1_733 = __builtin_shufflevector(__s1_733, __s1_733, 1, 0); \ + __ret_733 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_733), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_733, __p2_733)))); \ + __ret_733 = __builtin_shufflevector(__ret_733, __ret_733, 3, 2, 1, 0); \ + __ret_733; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s16(__p0_713, __p1_713, __p2_713) __extension__ ({ \ - int8x16_t __ret_713; \ - int8x8_t __s0_713 = __p0_713; \ - int16x8_t __s1_713 = __p1_713; \ - __ret_713 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_713), (int8x8_t)(vrshrn_n_s16(__s1_713, __p2_713)))); \ - __ret_713; \ +#define vrshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int8x16_t __ret_734; \ + int8x8_t __s0_734 = __p0_734; \ + int16x8_t __s1_734 = __p1_734; \ + __ret_734 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_734), (int8x8_t)(vrshrn_n_s16(__s1_734, __p2_734)))); \ + __ret_734; \ }) #else -#define vrshrn_high_n_s16(__p0_714, __p1_714, __p2_714) __extension__ ({ \ - int8x16_t __ret_714; \ - int8x8_t __s0_714 = __p0_714; \ - int16x8_t __s1_714 = __p1_714; \ - int8x8_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_714 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_714), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_714, __p2_714)))); \ - __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_714; \ +#define vrshrn_high_n_s16(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int8x16_t __ret_735; \ + int8x8_t __s0_735 = __p0_735; \ + int16x8_t __s1_735 = __p1_735; \ + int8x8_t __rev0_735; __rev0_735 = __builtin_shufflevector(__s0_735, __s0_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_735; __rev1_735 = __builtin_shufflevector(__s1_735, __s1_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_735), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_735, __p2_735)))); \ + __ret_735 = __builtin_shufflevector(__ret_735, __ret_735, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); @@ -58091,29 +59837,29 @@ __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { } #endif -__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vrsqrte_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); return __ret; } -__ai float64_t vrsqrted_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64_t vrsqrted_f64(float64_t __p0) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); return __ret; } -__ai float32_t vrsqrtes_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vrsqrtes_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -58123,17 +59869,17 @@ __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { float64_t __ret; __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); return __ret; } -__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); return __ret; @@ -58153,13 +59899,13 @@ __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); return __ret; } #else -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -58171,13 +59917,13 @@ __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); return __ret; } #else -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -58189,13 +59935,13 @@ __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __ #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -58207,13 +59953,13 @@ __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); return __ret; } #else -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -58225,13 +59971,13 @@ __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); return __ret; } #else -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -58243,13 +59989,13 @@ __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); return __ret; } #else -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -58328,12 +60074,12 @@ __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ __ret; \ }) -__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); return __ret; } -__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vshld_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); return __ret; @@ -58351,110 +60097,110 @@ __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u8(__p0_715, __p1_715) __extension__ ({ \ - uint16x8_t __ret_715; \ - uint8x16_t __s0_715 = __p0_715; \ - __ret_715 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_715), __p1_715)); \ - __ret_715; \ +#define vshll_high_n_u8(__p0_736, __p1_736) __extension__ ({ \ + uint16x8_t __ret_736; \ + uint8x16_t __s0_736 = __p0_736; \ + __ret_736 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_736), __p1_736)); \ + __ret_736; \ }) #else -#define vshll_high_n_u8(__p0_716, __p1_716) __extension__ ({ \ - uint16x8_t __ret_716; \ - uint8x16_t __s0_716 = __p0_716; \ - uint8x16_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_716 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_716), __p1_716)); \ - __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_716; \ +#define vshll_high_n_u8(__p0_737, __p1_737) __extension__ ({ \ + uint16x8_t __ret_737; \ + uint8x16_t __s0_737 = __p0_737; \ + uint8x16_t __rev0_737; __rev0_737 = __builtin_shufflevector(__s0_737, __s0_737, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_737), __p1_737)); \ + __ret_737 = __builtin_shufflevector(__ret_737, __ret_737, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u32(__p0_717, __p1_717) __extension__ ({ \ - uint64x2_t __ret_717; \ - uint32x4_t __s0_717 = __p0_717; \ - __ret_717 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_717), __p1_717)); \ - __ret_717; \ +#define vshll_high_n_u32(__p0_738, __p1_738) __extension__ ({ \ + uint64x2_t __ret_738; \ + uint32x4_t __s0_738 = __p0_738; \ + __ret_738 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_738), __p1_738)); \ + __ret_738; \ }) #else -#define vshll_high_n_u32(__p0_718, __p1_718) __extension__ ({ \ - uint64x2_t __ret_718; \ - uint32x4_t __s0_718 = __p0_718; \ - uint32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ - __ret_718 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_718), __p1_718)); \ - __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 1, 0); \ - __ret_718; \ +#define vshll_high_n_u32(__p0_739, __p1_739) __extension__ ({ \ + uint64x2_t __ret_739; \ + uint32x4_t __s0_739 = __p0_739; \ + uint32x4_t __rev0_739; __rev0_739 = __builtin_shufflevector(__s0_739, __s0_739, 3, 2, 1, 0); \ + __ret_739 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_739), __p1_739)); \ + __ret_739 = __builtin_shufflevector(__ret_739, __ret_739, 1, 0); \ + __ret_739; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u16(__p0_719, __p1_719) __extension__ ({ \ - uint32x4_t __ret_719; \ - uint16x8_t __s0_719 = __p0_719; \ - __ret_719 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_719), __p1_719)); \ - __ret_719; \ +#define vshll_high_n_u16(__p0_740, __p1_740) __extension__ ({ \ + uint32x4_t __ret_740; \ + uint16x8_t __s0_740 = __p0_740; \ + __ret_740 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_740), __p1_740)); \ + __ret_740; \ }) #else -#define vshll_high_n_u16(__p0_720, __p1_720) __extension__ ({ \ - uint32x4_t __ret_720; \ - uint16x8_t __s0_720 = __p0_720; \ - uint16x8_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_720 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_720), __p1_720)); \ - __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 3, 2, 1, 0); \ - __ret_720; \ +#define vshll_high_n_u16(__p0_741, __p1_741) __extension__ ({ \ + uint32x4_t __ret_741; \ + uint16x8_t __s0_741 = __p0_741; \ + uint16x8_t __rev0_741; __rev0_741 = __builtin_shufflevector(__s0_741, __s0_741, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_741 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_741), __p1_741)); \ + __ret_741 = __builtin_shufflevector(__ret_741, __ret_741, 3, 2, 1, 0); \ + __ret_741; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s8(__p0_721, __p1_721) __extension__ ({ \ - int16x8_t __ret_721; \ - int8x16_t __s0_721 = __p0_721; \ - __ret_721 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_721), __p1_721)); \ - __ret_721; \ +#define vshll_high_n_s8(__p0_742, __p1_742) __extension__ ({ \ + int16x8_t __ret_742; \ + int8x16_t __s0_742 = __p0_742; \ + __ret_742 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_742), __p1_742)); \ + __ret_742; \ }) #else -#define vshll_high_n_s8(__p0_722, __p1_722) __extension__ ({ \ - int16x8_t __ret_722; \ - int8x16_t __s0_722 = __p0_722; \ - int8x16_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_722 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_722), __p1_722)); \ - __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_722; \ +#define vshll_high_n_s8(__p0_743, __p1_743) __extension__ ({ \ + int16x8_t __ret_743; \ + int8x16_t __s0_743 = __p0_743; \ + int8x16_t __rev0_743; __rev0_743 = __builtin_shufflevector(__s0_743, __s0_743, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_743), __p1_743)); \ + __ret_743 = __builtin_shufflevector(__ret_743, __ret_743, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s32(__p0_723, __p1_723) __extension__ ({ \ - int64x2_t __ret_723; \ - int32x4_t __s0_723 = __p0_723; \ - __ret_723 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_723), __p1_723)); \ - __ret_723; \ +#define vshll_high_n_s32(__p0_744, __p1_744) __extension__ ({ \ + int64x2_t __ret_744; \ + int32x4_t __s0_744 = __p0_744; \ + __ret_744 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_744), __p1_744)); \ + __ret_744; \ }) #else -#define vshll_high_n_s32(__p0_724, __p1_724) __extension__ ({ \ - int64x2_t __ret_724; \ - int32x4_t __s0_724 = __p0_724; \ - int32x4_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \ - __ret_724 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_724), __p1_724)); \ - __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ - __ret_724; \ +#define vshll_high_n_s32(__p0_745, __p1_745) __extension__ ({ \ + int64x2_t __ret_745; \ + int32x4_t __s0_745 = __p0_745; \ + int32x4_t __rev0_745; __rev0_745 = __builtin_shufflevector(__s0_745, __s0_745, 3, 2, 1, 0); \ + __ret_745 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_745), __p1_745)); \ + __ret_745 = __builtin_shufflevector(__ret_745, __ret_745, 1, 0); \ + __ret_745; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s16(__p0_725, __p1_725) __extension__ ({ \ - int32x4_t __ret_725; \ - int16x8_t __s0_725 = __p0_725; \ - __ret_725 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_725), __p1_725)); \ - __ret_725; \ +#define vshll_high_n_s16(__p0_746, __p1_746) __extension__ ({ \ + int32x4_t __ret_746; \ + int16x8_t __s0_746 = __p0_746; \ + __ret_746 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_746), __p1_746)); \ + __ret_746; \ }) #else -#define vshll_high_n_s16(__p0_726, __p1_726) __extension__ ({ \ - int32x4_t __ret_726; \ - int16x8_t __s0_726 = __p0_726; \ - int16x8_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_726 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_726), __p1_726)); \ - __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ - __ret_726; \ +#define vshll_high_n_s16(__p0_747, __p1_747) __extension__ ({ \ + int32x4_t __ret_747; \ + int16x8_t __s0_747 = __p0_747; \ + int16x8_t __rev0_747; __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_747 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_747), __p1_747)); \ + __ret_747 = __builtin_shufflevector(__ret_747, __ret_747, 3, 2, 1, 0); \ + __ret_747; \ }) #endif @@ -58471,128 +60217,128 @@ __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u32(__p0_727, __p1_727, __p2_727) __extension__ ({ \ - uint16x8_t __ret_727; \ - uint16x4_t __s0_727 = __p0_727; \ - uint32x4_t __s1_727 = __p1_727; \ - __ret_727 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_727), (uint16x4_t)(vshrn_n_u32(__s1_727, __p2_727)))); \ - __ret_727; \ +#define vshrn_high_n_u32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ + uint16x8_t __ret_748; \ + uint16x4_t __s0_748 = __p0_748; \ + uint32x4_t __s1_748 = __p1_748; \ + __ret_748 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_748), (uint16x4_t)(vshrn_n_u32(__s1_748, __p2_748)))); \ + __ret_748; \ }) #else -#define vshrn_high_n_u32(__p0_728, __p1_728, __p2_728) __extension__ ({ \ - uint16x8_t __ret_728; \ - uint16x4_t __s0_728 = __p0_728; \ - uint32x4_t __s1_728 = __p1_728; \ - uint16x4_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \ - uint32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ - __ret_728 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_728), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_728, __p2_728)))); \ - __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_728; \ +#define vshrn_high_n_u32(__p0_749, __p1_749, __p2_749) __extension__ ({ \ + uint16x8_t __ret_749; \ + uint16x4_t __s0_749 = __p0_749; \ + uint32x4_t __s1_749 = __p1_749; \ + uint16x4_t __rev0_749; __rev0_749 = __builtin_shufflevector(__s0_749, __s0_749, 3, 2, 1, 0); \ + uint32x4_t __rev1_749; __rev1_749 = __builtin_shufflevector(__s1_749, __s1_749, 3, 2, 1, 0); \ + __ret_749 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_749), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_749, __p2_749)))); \ + __ret_749 = __builtin_shufflevector(__ret_749, __ret_749, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_749; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u64(__p0_729, __p1_729, __p2_729) __extension__ ({ \ - uint32x4_t __ret_729; \ - uint32x2_t __s0_729 = __p0_729; \ - uint64x2_t __s1_729 = __p1_729; \ - __ret_729 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_729), (uint32x2_t)(vshrn_n_u64(__s1_729, __p2_729)))); \ - __ret_729; \ +#define vshrn_high_n_u64(__p0_750, __p1_750, __p2_750) __extension__ ({ \ + uint32x4_t __ret_750; \ + uint32x2_t __s0_750 = __p0_750; \ + uint64x2_t __s1_750 = __p1_750; \ + __ret_750 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_750), (uint32x2_t)(vshrn_n_u64(__s1_750, __p2_750)))); \ + __ret_750; \ }) #else -#define vshrn_high_n_u64(__p0_730, __p1_730, __p2_730) __extension__ ({ \ - uint32x4_t __ret_730; \ - uint32x2_t __s0_730 = __p0_730; \ - uint64x2_t __s1_730 = __p1_730; \ - uint32x2_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 1, 0); \ - uint64x2_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 1, 0); \ - __ret_730 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_730), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_730, __p2_730)))); \ - __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ - __ret_730; \ +#define vshrn_high_n_u64(__p0_751, __p1_751, __p2_751) __extension__ ({ \ + uint32x4_t __ret_751; \ + uint32x2_t __s0_751 = __p0_751; \ + uint64x2_t __s1_751 = __p1_751; \ + uint32x2_t __rev0_751; __rev0_751 = __builtin_shufflevector(__s0_751, __s0_751, 1, 0); \ + uint64x2_t __rev1_751; __rev1_751 = __builtin_shufflevector(__s1_751, __s1_751, 1, 0); \ + __ret_751 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_751), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_751, __p2_751)))); \ + __ret_751 = __builtin_shufflevector(__ret_751, __ret_751, 3, 2, 1, 0); \ + __ret_751; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u16(__p0_731, __p1_731, __p2_731) __extension__ ({ \ - uint8x16_t __ret_731; \ - uint8x8_t __s0_731 = __p0_731; \ - uint16x8_t __s1_731 = __p1_731; \ - __ret_731 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_731), (uint8x8_t)(vshrn_n_u16(__s1_731, __p2_731)))); \ - __ret_731; \ +#define vshrn_high_n_u16(__p0_752, __p1_752, __p2_752) __extension__ ({ \ + uint8x16_t __ret_752; \ + uint8x8_t __s0_752 = __p0_752; \ + uint16x8_t __s1_752 = __p1_752; \ + __ret_752 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_752), (uint8x8_t)(vshrn_n_u16(__s1_752, __p2_752)))); \ + __ret_752; \ }) #else -#define vshrn_high_n_u16(__p0_732, __p1_732, __p2_732) __extension__ ({ \ - uint8x16_t __ret_732; \ - uint8x8_t __s0_732 = __p0_732; \ - uint16x8_t __s1_732 = __p1_732; \ - uint8x8_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_732 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_732), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_732, __p2_732)))); \ - __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_732; \ +#define vshrn_high_n_u16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ + uint8x16_t __ret_753; \ + uint8x8_t __s0_753 = __p0_753; \ + uint16x8_t __s1_753 = __p1_753; \ + uint8x8_t __rev0_753; __rev0_753 = __builtin_shufflevector(__s0_753, __s0_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_753; __rev1_753 = __builtin_shufflevector(__s1_753, __s1_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_753), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_753, __p2_753)))); \ + __ret_753 = __builtin_shufflevector(__ret_753, __ret_753, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s32(__p0_733, __p1_733, __p2_733) __extension__ ({ \ - int16x8_t __ret_733; \ - int16x4_t __s0_733 = __p0_733; \ - int32x4_t __s1_733 = __p1_733; \ - __ret_733 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_733), (int16x4_t)(vshrn_n_s32(__s1_733, __p2_733)))); \ - __ret_733; \ +#define vshrn_high_n_s32(__p0_754, __p1_754, __p2_754) __extension__ ({ \ + int16x8_t __ret_754; \ + int16x4_t __s0_754 = __p0_754; \ + int32x4_t __s1_754 = __p1_754; \ + __ret_754 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_754), (int16x4_t)(vshrn_n_s32(__s1_754, __p2_754)))); \ + __ret_754; \ }) #else -#define vshrn_high_n_s32(__p0_734, __p1_734, __p2_734) __extension__ ({ \ - int16x8_t __ret_734; \ - int16x4_t __s0_734 = __p0_734; \ - int32x4_t __s1_734 = __p1_734; \ - int16x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ - int32x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ - __ret_734 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_734), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_734, __p2_734)))); \ - __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_734; \ +#define vshrn_high_n_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ + int16x8_t __ret_755; \ + int16x4_t __s0_755 = __p0_755; \ + int32x4_t __s1_755 = __p1_755; \ + int16x4_t __rev0_755; __rev0_755 = __builtin_shufflevector(__s0_755, __s0_755, 3, 2, 1, 0); \ + int32x4_t __rev1_755; __rev1_755 = __builtin_shufflevector(__s1_755, __s1_755, 3, 2, 1, 0); \ + __ret_755 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_755), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_755, __p2_755)))); \ + __ret_755 = __builtin_shufflevector(__ret_755, __ret_755, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_755; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s64(__p0_735, __p1_735, __p2_735) __extension__ ({ \ - int32x4_t __ret_735; \ - int32x2_t __s0_735 = __p0_735; \ - int64x2_t __s1_735 = __p1_735; \ - __ret_735 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_735), (int32x2_t)(vshrn_n_s64(__s1_735, __p2_735)))); \ - __ret_735; \ +#define vshrn_high_n_s64(__p0_756, __p1_756, __p2_756) __extension__ ({ \ + int32x4_t __ret_756; \ + int32x2_t __s0_756 = __p0_756; \ + int64x2_t __s1_756 = __p1_756; \ + __ret_756 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_756), (int32x2_t)(vshrn_n_s64(__s1_756, __p2_756)))); \ + __ret_756; \ }) #else -#define vshrn_high_n_s64(__p0_736, __p1_736, __p2_736) __extension__ ({ \ - int32x4_t __ret_736; \ - int32x2_t __s0_736 = __p0_736; \ - int64x2_t __s1_736 = __p1_736; \ - int32x2_t __rev0_736; __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 1, 0); \ - int64x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ - __ret_736 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_736), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_736, __p2_736)))); \ - __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \ - __ret_736; \ +#define vshrn_high_n_s64(__p0_757, __p1_757, __p2_757) __extension__ ({ \ + int32x4_t __ret_757; \ + int32x2_t __s0_757 = __p0_757; \ + int64x2_t __s1_757 = __p1_757; \ + int32x2_t __rev0_757; __rev0_757 = __builtin_shufflevector(__s0_757, __s0_757, 1, 0); \ + int64x2_t __rev1_757; __rev1_757 = __builtin_shufflevector(__s1_757, __s1_757, 1, 0); \ + __ret_757 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_757), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_757, __p2_757)))); \ + __ret_757 = __builtin_shufflevector(__ret_757, __ret_757, 3, 2, 1, 0); \ + __ret_757; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ - int8x16_t __ret_737; \ - int8x8_t __s0_737 = __p0_737; \ - int16x8_t __s1_737 = __p1_737; \ - __ret_737 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_737), (int8x8_t)(vshrn_n_s16(__s1_737, __p2_737)))); \ - __ret_737; \ +#define vshrn_high_n_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \ + int8x16_t __ret_758; \ + int8x8_t __s0_758 = __p0_758; \ + int16x8_t __s1_758 = __p1_758; \ + __ret_758 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_758), (int8x8_t)(vshrn_n_s16(__s1_758, __p2_758)))); \ + __ret_758; \ }) #else -#define vshrn_high_n_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ - int8x16_t __ret_738; \ - int8x8_t __s0_738 = __p0_738; \ - int16x8_t __s1_738 = __p1_738; \ - int8x8_t __rev0_738; __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_738 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_738), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_738, __p2_738)))); \ - __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_738; \ +#define vshrn_high_n_s16(__p0_759, __p1_759, __p2_759) __extension__ ({ \ + int8x16_t __ret_759; \ + int8x8_t __s0_759 = __p0_759; \ + int16x8_t __s1_759 = __p1_759; \ + int8x8_t __rev0_759; __rev0_759 = __builtin_shufflevector(__s0_759, __s0_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_759; __rev1_759 = __builtin_shufflevector(__s1_759, __s1_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_759), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_759, __p2_759)))); \ + __ret_759 = __builtin_shufflevector(__ret_759, __ret_759, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759; \ }) #endif @@ -58638,34 +60384,34 @@ __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { }) #endif -__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { uint8_t __ret; __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); return __ret; } -__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); return __ret; } -__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); return __ret; } -__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { uint16_t __ret; __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -58676,13 +60422,13 @@ __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -58693,13 +60439,13 @@ __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -58710,13 +60456,13 @@ __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -58727,13 +60473,13 @@ __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -58744,13 +60490,13 @@ __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -58760,19 +60506,19 @@ __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { } #endif -__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -58783,13 +60529,13 @@ __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); @@ -58799,13 +60545,13 @@ __ai float64x2_t vsqrtq_f64(float64x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); @@ -58814,19 +60560,19 @@ __ai float32x4_t vsqrtq_f32(float32x4_t __p0) { } #endif -__ai float64x1_t vsqrt_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vsqrt_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); @@ -59683,24 +61429,24 @@ __ai float32x2_t vsqrt_f32(float32x2_t __p0) { poly128_t __s1 = __p1; \ __builtin_neon_vstrq_p128(__p0, __s1); \ }) -__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); return __ret; } -__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vsubd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __p0 - __p1; return __ret; } #else -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -59710,19 +61456,19 @@ __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { } #endif -__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; __ret = __p0 - __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); return __ret; } #else -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint16x8_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -59734,13 +61480,13 @@ __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); return __ret; } #else -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -59752,13 +61498,13 @@ __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59770,13 +61516,13 @@ __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); return __ret; } #else -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int16x8_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -59788,13 +61534,13 @@ __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); return __ret; } #else -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int32x4_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -59806,13 +61552,13 @@ __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); return __ret; } #else -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int8x16_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59824,13 +61570,13 @@ __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59841,13 +61587,13 @@ __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -59858,13 +61604,13 @@ __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59875,13 +61621,13 @@ __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59892,13 +61638,13 @@ __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -59909,13 +61655,13 @@ __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59926,13 +61672,13 @@ __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = __p0 - vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59943,13 +61689,13 @@ __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = __p0 - vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -59960,13 +61706,13 @@ __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = __p0 - vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59977,13 +61723,13 @@ __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = __p0 - vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -59994,13 +61740,13 @@ __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = __p0 - vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60011,13 +61757,13 @@ __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = __p0 - vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60028,13 +61774,13 @@ __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60045,13 +61791,13 @@ __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60062,13 +61808,13 @@ __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60079,13 +61825,13 @@ __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60096,13 +61842,13 @@ __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60113,13 +61859,13 @@ __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60130,13 +61876,13 @@ __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60147,13 +61893,13 @@ __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60164,13 +61910,13 @@ __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60181,13 +61927,13 @@ __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60198,13 +61944,13 @@ __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60215,13 +61961,13 @@ __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60232,13 +61978,13 @@ __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60249,13 +61995,13 @@ __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60266,13 +62012,13 @@ __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60283,13 +62029,13 @@ __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60300,13 +62046,13 @@ __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60317,13 +62063,13 @@ __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60334,13 +62080,13 @@ __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60351,13 +62097,13 @@ __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60368,13 +62114,13 @@ __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60385,13 +62131,13 @@ __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60402,13 +62148,47 @@ __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60419,13 +62199,13 @@ __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60436,13 +62216,13 @@ __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60453,13 +62233,13 @@ __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60470,13 +62250,13 @@ __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60487,13 +62267,13 @@ __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60504,13 +62284,13 @@ __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60521,13 +62301,13 @@ __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60538,13 +62318,13 @@ __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60555,13 +62335,13 @@ __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60572,13 +62352,13 @@ __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60589,13 +62369,13 @@ __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60606,13 +62386,13 @@ __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60623,13 +62403,13 @@ __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60640,13 +62420,13 @@ __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60657,13 +62437,13 @@ __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60674,13 +62454,13 @@ __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60691,13 +62471,13 @@ __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60708,13 +62488,13 @@ __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60725,13 +62505,13 @@ __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60742,13 +62522,13 @@ __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60759,13 +62539,13 @@ __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60775,19 +62555,53 @@ __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { } #endif -__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { uint64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60798,13 +62612,13 @@ __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60815,13 +62629,13 @@ __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60831,54 +62645,54 @@ __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { } #endif -__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); return __ret; } -__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); return __ret; } -__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { int8_t __ret; __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); return __ret; } -__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { int32_t __ret; __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); return __ret; } -__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { int64_t __ret; __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); return __ret; } -__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { int16_t __ret; __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60889,13 +62703,13 @@ __ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60906,13 +62720,13 @@ __ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60923,13 +62737,13 @@ __ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60940,13 +62754,13 @@ __ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { int8x8_t __ret; __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -60957,13 +62771,13 @@ __ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -60973,19 +62787,19 @@ __ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { } #endif -__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { int64x1_t __ret; __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { int16x4_t __ret; __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -60996,13 +62810,13 @@ __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61013,13 +62827,13 @@ __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61030,13 +62844,13 @@ __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61047,13 +62861,13 @@ __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61064,13 +62878,13 @@ __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61081,13 +62895,13 @@ __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61098,13 +62912,13 @@ __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61115,13 +62929,13 @@ __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61132,13 +62946,13 @@ __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61149,13 +62963,13 @@ __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61166,13 +62980,13 @@ __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61183,13 +62997,13 @@ __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61200,13 +63014,13 @@ __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61217,13 +63031,13 @@ __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61234,13 +63048,13 @@ __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61251,13 +63065,13 @@ __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61268,13 +63082,13 @@ __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61285,13 +63099,13 @@ __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61302,13 +63116,13 @@ __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61319,13 +63133,13 @@ __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61336,13 +63150,13 @@ __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61353,13 +63167,13 @@ __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61370,13 +63184,47 @@ __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61387,13 +63235,13 @@ __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61404,13 +63252,13 @@ __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61421,13 +63269,13 @@ __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61438,13 +63286,13 @@ __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61455,13 +63303,13 @@ __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61472,13 +63320,13 @@ __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61489,13 +63337,13 @@ __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61506,13 +63354,13 @@ __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61523,13 +63371,13 @@ __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); return __ret; } #else -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61540,13 +63388,13 @@ __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61557,13 +63405,13 @@ __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61574,13 +63422,13 @@ __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61591,13 +63439,13 @@ __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61608,13 +63456,13 @@ __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61625,13 +63473,13 @@ __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61642,13 +63490,13 @@ __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61659,13 +63507,13 @@ __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61676,13 +63524,13 @@ __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61693,13 +63541,13 @@ __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61710,13 +63558,13 @@ __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61727,13 +63575,13 @@ __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61744,13 +63592,47 @@ __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61761,13 +63643,13 @@ __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61778,13 +63660,13 @@ __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61795,13 +63677,13 @@ __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61812,13 +63694,13 @@ __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61829,13 +63711,13 @@ __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61846,13 +63728,13 @@ __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61863,13 +63745,13 @@ __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61880,13 +63762,13 @@ __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61897,13 +63779,13 @@ __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61914,13 +63796,13 @@ __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61931,13 +63813,13 @@ __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61948,13 +63830,13 @@ __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -61965,13 +63847,13 @@ __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61982,13 +63864,13 @@ __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -61999,13 +63881,13 @@ __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62016,13 +63898,13 @@ __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62033,13 +63915,13 @@ __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62050,13 +63932,13 @@ __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62067,13 +63949,13 @@ __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62084,13 +63966,13 @@ __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62101,13 +63983,13 @@ __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62118,13 +64000,47 @@ __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62135,13 +64051,13 @@ __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62152,13 +64068,13 @@ __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62169,13 +64085,13 @@ __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62186,13 +64102,13 @@ __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62203,13 +64119,13 @@ __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62220,13 +64136,13 @@ __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62237,13 +64153,13 @@ __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62254,13 +64170,13 @@ __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62271,13 +64187,13 @@ __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); return __ret; } #else -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62288,13 +64204,13 @@ __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62305,13 +64221,13 @@ __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62322,13 +64238,13 @@ __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62339,13 +64255,13 @@ __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62356,13 +64272,13 @@ __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62373,13 +64289,13 @@ __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62390,13 +64306,13 @@ __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62407,13 +64323,13 @@ __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62424,13 +64340,13 @@ __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -62441,13 +64357,13 @@ __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62458,13 +64374,13 @@ __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62475,13 +64391,13 @@ __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -62491,1996 +64407,14 @@ __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { } #endif -__ai __attribute__((target("aes"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); - return __ret; -} -#else -__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_bf16(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \ - bfloat16x8_t __ret_739; \ - bfloat16x8_t __s0_739 = __p0_739; \ - bfloat16x4_t __s2_739 = __p2_739; \ - __ret_739 = vsetq_lane_bf16(vget_lane_bf16(__s2_739, __p3_739), __s0_739, __p1_739); \ - __ret_739; \ -}) -#else -#define vcopyq_lane_bf16(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \ - bfloat16x8_t __ret_740; \ - bfloat16x8_t __s0_740 = __p0_740; \ - bfloat16x4_t __s2_740 = __p2_740; \ - bfloat16x8_t __rev0_740; __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_740; __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 3, 2, 1, 0); \ - __ret_740 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_740, __p3_740), __rev0_740, __p1_740); \ - __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_740; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_bf16(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \ - bfloat16x4_t __ret_741; \ - bfloat16x4_t __s0_741 = __p0_741; \ - bfloat16x4_t __s2_741 = __p2_741; \ - __ret_741 = vset_lane_bf16(vget_lane_bf16(__s2_741, __p3_741), __s0_741, __p1_741); \ - __ret_741; \ -}) -#else -#define vcopy_lane_bf16(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \ - bfloat16x4_t __ret_742; \ - bfloat16x4_t __s0_742 = __p0_742; \ - bfloat16x4_t __s2_742 = __p2_742; \ - bfloat16x4_t __rev0_742; __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_742; __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 3, 2, 1, 0); \ - __ret_742 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_742, __p3_742), __rev0_742, __p1_742); \ - __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \ - __ret_742; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_bf16(__p0_743, __p1_743, __p2_743, __p3_743) __extension__ ({ \ - bfloat16x8_t __ret_743; \ - bfloat16x8_t __s0_743 = __p0_743; \ - bfloat16x8_t __s2_743 = __p2_743; \ - __ret_743 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_743, __p3_743), __s0_743, __p1_743); \ - __ret_743; \ -}) -#else -#define vcopyq_laneq_bf16(__p0_744, __p1_744, __p2_744, __p3_744) __extension__ ({ \ - bfloat16x8_t __ret_744; \ - bfloat16x8_t __s0_744 = __p0_744; \ - bfloat16x8_t __s2_744 = __p2_744; \ - bfloat16x8_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_744; __rev2_744 = __builtin_shufflevector(__s2_744, __s2_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_744 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_744, __p3_744), __rev0_744, __p1_744); \ - __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_744; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_bf16(__p0_745, __p1_745, __p2_745, __p3_745) __extension__ ({ \ - bfloat16x4_t __ret_745; \ - bfloat16x4_t __s0_745 = __p0_745; \ - bfloat16x8_t __s2_745 = __p2_745; \ - __ret_745 = vset_lane_bf16(vgetq_lane_bf16(__s2_745, __p3_745), __s0_745, __p1_745); \ - __ret_745; \ -}) -#else -#define vcopy_laneq_bf16(__p0_746, __p1_746, __p2_746, __p3_746) __extension__ ({ \ - bfloat16x4_t __ret_746; \ - bfloat16x4_t __s0_746 = __p0_746; \ - bfloat16x8_t __s2_746 = __p2_746; \ - bfloat16x4_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_746; __rev2_746 = __builtin_shufflevector(__s2_746, __s2_746, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_746 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_746, __p3_746), __rev0_746, __p1_746); \ - __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ - __ret_746; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = __a64_vcvtq_low_bf16_f32(__p0); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_u32(__p0_747, __p1_747, __p2_747, __p3_747) __extension__ ({ \ - uint32x4_t __ret_747; \ - uint32x4_t __s0_747 = __p0_747; \ - uint8x16_t __s1_747 = __p1_747; \ - uint8x16_t __s2_747 = __p2_747; \ -uint8x16_t __reint_747 = __s2_747; \ -uint32x4_t __reint1_747 = splatq_laneq_u32(*(uint32x4_t *) &__reint_747, __p3_747); \ - __ret_747 = vdotq_u32(__s0_747, __s1_747, *(uint8x16_t *) &__reint1_747); \ - __ret_747; \ -}) -#else -#define vdotq_laneq_u32(__p0_748, __p1_748, __p2_748, __p3_748) __extension__ ({ \ - uint32x4_t __ret_748; \ - uint32x4_t __s0_748 = __p0_748; \ - uint8x16_t __s1_748 = __p1_748; \ - uint8x16_t __s2_748 = __p2_748; \ - uint32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ - uint8x16_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_748; __rev2_748 = __builtin_shufflevector(__s2_748, __s2_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_748 = __rev2_748; \ -uint32x4_t __reint1_748 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_748, __p3_748); \ - __ret_748 = __noswap_vdotq_u32(__rev0_748, __rev1_748, *(uint8x16_t *) &__reint1_748); \ - __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 3, 2, 1, 0); \ - __ret_748; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_s32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \ - int32x4_t __ret_749; \ - int32x4_t __s0_749 = __p0_749; \ - int8x16_t __s1_749 = __p1_749; \ - int8x16_t __s2_749 = __p2_749; \ -int8x16_t __reint_749 = __s2_749; \ -int32x4_t __reint1_749 = splatq_laneq_s32(*(int32x4_t *) &__reint_749, __p3_749); \ - __ret_749 = vdotq_s32(__s0_749, __s1_749, *(int8x16_t *) &__reint1_749); \ - __ret_749; \ -}) -#else -#define vdotq_laneq_s32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \ - int32x4_t __ret_750; \ - int32x4_t __s0_750 = __p0_750; \ - int8x16_t __s1_750 = __p1_750; \ - int8x16_t __s2_750 = __p2_750; \ - int32x4_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 3, 2, 1, 0); \ - int8x16_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_750; __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_750 = __rev2_750; \ -int32x4_t __reint1_750 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_750, __p3_750); \ - __ret_750 = __noswap_vdotq_s32(__rev0_750, __rev1_750, *(int8x16_t *) &__reint1_750); \ - __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ - __ret_750; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_u32(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \ - uint32x2_t __ret_751; \ - uint32x2_t __s0_751 = __p0_751; \ - uint8x8_t __s1_751 = __p1_751; \ - uint8x16_t __s2_751 = __p2_751; \ -uint8x16_t __reint_751 = __s2_751; \ -uint32x2_t __reint1_751 = splat_laneq_u32(*(uint32x4_t *) &__reint_751, __p3_751); \ - __ret_751 = vdot_u32(__s0_751, __s1_751, *(uint8x8_t *) &__reint1_751); \ - __ret_751; \ -}) -#else -#define vdot_laneq_u32(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \ - uint32x2_t __ret_752; \ - uint32x2_t __s0_752 = __p0_752; \ - uint8x8_t __s1_752 = __p1_752; \ - uint8x16_t __s2_752 = __p2_752; \ - uint32x2_t __rev0_752; __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 1, 0); \ - uint8x8_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_752; __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_752 = __rev2_752; \ -uint32x2_t __reint1_752 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_752, __p3_752); \ - __ret_752 = __noswap_vdot_u32(__rev0_752, __rev1_752, *(uint8x8_t *) &__reint1_752); \ - __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 1, 0); \ - __ret_752; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \ - int32x2_t __ret_753; \ - int32x2_t __s0_753 = __p0_753; \ - int8x8_t __s1_753 = __p1_753; \ - int8x16_t __s2_753 = __p2_753; \ -int8x16_t __reint_753 = __s2_753; \ -int32x2_t __reint1_753 = splat_laneq_s32(*(int32x4_t *) &__reint_753, __p3_753); \ - __ret_753 = vdot_s32(__s0_753, __s1_753, *(int8x8_t *) &__reint1_753); \ - __ret_753; \ -}) -#else -#define vdot_laneq_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \ - int32x2_t __ret_754; \ - int32x2_t __s0_754 = __p0_754; \ - int8x8_t __s1_754 = __p1_754; \ - int8x16_t __s2_754 = __p2_754; \ - int32x2_t __rev0_754; __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \ - int8x8_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_754; __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_754 = __rev2_754; \ -int32x2_t __reint1_754 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_754, __p3_754); \ - __ret_754 = __noswap_vdot_s32(__rev0_754, __rev1_754, *(int8x8_t *) &__reint1_754); \ - __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \ - __ret_754; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_lane_f16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \ - float16_t __ret_755; \ - float16_t __s0_755 = __p0_755; \ - float16_t __s1_755 = __p1_755; \ - float16x4_t __s2_755 = __p2_755; \ - __ret_755 = vfmah_lane_f16(__s0_755, -__s1_755, __s2_755, __p3_755); \ - __ret_755; \ -}) -#else -#define vfmsh_lane_f16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \ - float16_t __ret_756; \ - float16_t __s0_756 = __p0_756; \ - float16_t __s1_756 = __p1_756; \ - float16x4_t __s2_756 = __p2_756; \ - float16x4_t __rev2_756; __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \ - __ret_756 = __noswap_vfmah_lane_f16(__s0_756, -__s1_756, __rev2_756, __p3_756); \ - __ret_756; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f16(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \ - float16x8_t __ret_757; \ - float16x8_t __s0_757 = __p0_757; \ - float16x8_t __s1_757 = __p1_757; \ - float16x4_t __s2_757 = __p2_757; \ - __ret_757 = vfmaq_lane_f16(__s0_757, -__s1_757, __s2_757, __p3_757); \ - __ret_757; \ -}) -#else -#define vfmsq_lane_f16(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \ - float16x8_t __ret_758; \ - float16x8_t __s0_758 = __p0_758; \ - float16x8_t __s1_758 = __p1_758; \ - float16x4_t __s2_758 = __p2_758; \ - float16x8_t __rev0_758; __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_758; __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 3, 2, 1, 0); \ - __ret_758 = __noswap_vfmaq_lane_f16(__rev0_758, -__rev1_758, __rev2_758, __p3_758); \ - __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_758; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \ - float16x4_t __ret_759; \ - float16x4_t __s0_759 = __p0_759; \ - float16x4_t __s1_759 = __p1_759; \ - float16x4_t __s2_759 = __p2_759; \ - __ret_759 = vfma_lane_f16(__s0_759, -__s1_759, __s2_759, __p3_759); \ - __ret_759; \ -}) -#else -#define vfms_lane_f16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ - float16x4_t __ret_760; \ - float16x4_t __s0_760 = __p0_760; \ - float16x4_t __s1_760 = __p1_760; \ - float16x4_t __s2_760 = __p2_760; \ - float16x4_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \ - float16x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ - float16x4_t __rev2_760; __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \ - __ret_760 = __noswap_vfma_lane_f16(__rev0_760, -__rev1_760, __rev2_760, __p3_760); \ - __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \ - __ret_760; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_laneq_f16(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ - float16_t __ret_761; \ - float16_t __s0_761 = __p0_761; \ - float16_t __s1_761 = __p1_761; \ - float16x8_t __s2_761 = __p2_761; \ - __ret_761 = vfmah_laneq_f16(__s0_761, -__s1_761, __s2_761, __p3_761); \ - __ret_761; \ -}) -#else -#define vfmsh_laneq_f16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ - float16_t __ret_762; \ - float16_t __s0_762 = __p0_762; \ - float16_t __s1_762 = __p1_762; \ - float16x8_t __s2_762 = __p2_762; \ - float16x8_t __rev2_762; __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_762 = __noswap_vfmah_laneq_f16(__s0_762, -__s1_762, __rev2_762, __p3_762); \ - __ret_762; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ - float16x8_t __ret_763; \ - float16x8_t __s0_763 = __p0_763; \ - float16x8_t __s1_763 = __p1_763; \ - float16x8_t __s2_763 = __p2_763; \ - __ret_763 = vfmaq_laneq_f16(__s0_763, -__s1_763, __s2_763, __p3_763); \ - __ret_763; \ -}) -#else -#define vfmsq_laneq_f16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ - float16x8_t __ret_764; \ - float16x8_t __s0_764 = __p0_764; \ - float16x8_t __s1_764 = __p1_764; \ - float16x8_t __s2_764 = __p2_764; \ - float16x8_t __rev0_764; __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_764; __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_764 = __noswap_vfmaq_laneq_f16(__rev0_764, -__rev1_764, __rev2_764, __p3_764); \ - __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_764; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f16(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ - float16x4_t __ret_765; \ - float16x4_t __s0_765 = __p0_765; \ - float16x4_t __s1_765 = __p1_765; \ - float16x8_t __s2_765 = __p2_765; \ - __ret_765 = vfma_laneq_f16(__s0_765, -__s1_765, __s2_765, __p3_765); \ - __ret_765; \ -}) -#else -#define vfms_laneq_f16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ - float16x4_t __ret_766; \ - float16x4_t __s0_766 = __p0_766; \ - float16x4_t __s1_766 = __p1_766; \ - float16x8_t __s2_766 = __p2_766; \ - float16x4_t __rev0_766; __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 3, 2, 1, 0); \ - float16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ - float16x8_t __rev2_766; __rev2_766 = __builtin_shufflevector(__s2_766, __s2_766, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_766 = __noswap_vfma_laneq_f16(__rev0_766, -__rev1_766, __rev2_766, __p3_766); \ - __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \ - __ret_766; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \ - float16x8_t __ret_767; \ - float16x8_t __s0_767 = __p0_767; \ - float16x8_t __s1_767 = __p1_767; \ - __ret_767 = __s0_767 * splatq_laneq_f16(__s1_767, __p2_767); \ - __ret_767; \ -}) -#else -#define vmulq_laneq_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \ - float16x8_t __ret_768; \ - float16x8_t __s0_768 = __p0_768; \ - float16x8_t __s1_768 = __p1_768; \ - float16x8_t __rev0_768; __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_768 = __rev0_768 * __noswap_splatq_laneq_f16(__rev1_768, __p2_768); \ - __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_768; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ - float16x4_t __ret_769; \ - float16x4_t __s0_769 = __p0_769; \ - float16x8_t __s1_769 = __p1_769; \ - __ret_769 = __s0_769 * splat_laneq_f16(__s1_769, __p2_769); \ - __ret_769; \ -}) -#else -#define vmul_laneq_f16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ - float16x4_t __ret_770; \ - float16x4_t __s0_770 = __p0_770; \ - float16x8_t __s1_770 = __p1_770; \ - float16x4_t __rev0_770; __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \ - float16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_770 = __rev0_770 * __noswap_splat_laneq_f16(__rev1_770, __p2_770); \ - __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \ - __ret_770; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f16(__p0_771, __p1_771, __p2_771) __extension__ ({ \ - float16x8_t __ret_771; \ - float16x8_t __s0_771 = __p0_771; \ - float16x4_t __s1_771 = __p1_771; \ - __ret_771 = vmulxq_f16(__s0_771, splatq_lane_f16(__s1_771, __p2_771)); \ - __ret_771; \ -}) -#else -#define vmulxq_lane_f16(__p0_772, __p1_772, __p2_772) __extension__ ({ \ - float16x8_t __ret_772; \ - float16x8_t __s0_772 = __p0_772; \ - float16x4_t __s1_772 = __p1_772; \ - float16x8_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ - __ret_772 = __noswap_vmulxq_f16(__rev0_772, __noswap_splatq_lane_f16(__rev1_772, __p2_772)); \ - __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_772; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f16(__p0_773, __p1_773, __p2_773) __extension__ ({ \ - float16x4_t __ret_773; \ - float16x4_t __s0_773 = __p0_773; \ - float16x4_t __s1_773 = __p1_773; \ - __ret_773 = vmulx_f16(__s0_773, splat_lane_f16(__s1_773, __p2_773)); \ - __ret_773; \ -}) -#else -#define vmulx_lane_f16(__p0_774, __p1_774, __p2_774) __extension__ ({ \ - float16x4_t __ret_774; \ - float16x4_t __s0_774 = __p0_774; \ - float16x4_t __s1_774 = __p1_774; \ - float16x4_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \ - float16x4_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \ - __ret_774 = __noswap_vmulx_f16(__rev0_774, __noswap_splat_lane_f16(__rev1_774, __p2_774)); \ - __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ - __ret_774; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ - float16x8_t __ret_775; \ - float16x8_t __s0_775 = __p0_775; \ - float16x8_t __s1_775 = __p1_775; \ - __ret_775 = vmulxq_f16(__s0_775, splatq_laneq_f16(__s1_775, __p2_775)); \ - __ret_775; \ -}) -#else -#define vmulxq_laneq_f16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ - float16x8_t __ret_776; \ - float16x8_t __s0_776 = __p0_776; \ - float16x8_t __s1_776 = __p1_776; \ - float16x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776 = __noswap_vmulxq_f16(__rev0_776, __noswap_splatq_laneq_f16(__rev1_776, __p2_776)); \ - __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f16(__p0_777, __p1_777, __p2_777) __extension__ ({ \ - float16x4_t __ret_777; \ - float16x4_t __s0_777 = __p0_777; \ - float16x8_t __s1_777 = __p1_777; \ - __ret_777 = vmulx_f16(__s0_777, splat_laneq_f16(__s1_777, __p2_777)); \ - __ret_777; \ -}) -#else -#define vmulx_laneq_f16(__p0_778, __p1_778, __p2_778) __extension__ ({ \ - float16x4_t __ret_778; \ - float16x4_t __s0_778 = __p0_778; \ - float16x8_t __s1_778 = __p1_778; \ - float16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ - float16x8_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_778 = __noswap_vmulx_f16(__rev0_778, __noswap_splat_laneq_f16(__rev1_778, __p2_778)); \ - __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \ - __ret_778; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret; \ -}) -#else -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret; \ -}) -#else -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64491,13 +64425,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64507,110 +64441,6 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, } #endif -#ifdef __LITTLE_ENDIAN__ -#define vsudotq_laneq_s32(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ - int32x4_t __ret_779; \ - int32x4_t __s0_779 = __p0_779; \ - int8x16_t __s1_779 = __p1_779; \ - uint8x16_t __s2_779 = __p2_779; \ -uint8x16_t __reint_779 = __s2_779; \ - __ret_779 = vusdotq_s32(__s0_779, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_779, __p3_779)), __s1_779); \ - __ret_779; \ -}) -#else -#define vsudotq_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ - int32x4_t __ret_780; \ - int32x4_t __s0_780 = __p0_780; \ - int8x16_t __s1_780 = __p1_780; \ - uint8x16_t __s2_780 = __p2_780; \ - int32x4_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \ - int8x16_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_780; __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_780 = __rev2_780; \ - __ret_780 = __noswap_vusdotq_s32(__rev0_780, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_780, __p3_780)), __rev1_780); \ - __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ - __ret_780; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsudot_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ - int32x2_t __ret_781; \ - int32x2_t __s0_781 = __p0_781; \ - int8x8_t __s1_781 = __p1_781; \ - uint8x16_t __s2_781 = __p2_781; \ -uint8x16_t __reint_781 = __s2_781; \ - __ret_781 = vusdot_s32(__s0_781, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_781, __p3_781)), __s1_781); \ - __ret_781; \ -}) -#else -#define vsudot_laneq_s32(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ - int32x2_t __ret_782; \ - int32x2_t __s0_782 = __p0_782; \ - int8x8_t __s1_782 = __p1_782; \ - uint8x16_t __s2_782 = __p2_782; \ - int32x2_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \ - int8x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_782; __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_782 = __rev2_782; \ - __ret_782 = __noswap_vusdot_s32(__rev0_782, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_782, __p3_782)), __rev1_782); \ - __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 1, 0); \ - __ret_782; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdotq_laneq_s32(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ - int32x4_t __ret_783; \ - int32x4_t __s0_783 = __p0_783; \ - uint8x16_t __s1_783 = __p1_783; \ - int8x16_t __s2_783 = __p2_783; \ -int8x16_t __reint_783 = __s2_783; \ - __ret_783 = vusdotq_s32(__s0_783, __s1_783, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_783, __p3_783))); \ - __ret_783; \ -}) -#else -#define vusdotq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ - int32x4_t __ret_784; \ - int32x4_t __s0_784 = __p0_784; \ - uint8x16_t __s1_784 = __p1_784; \ - int8x16_t __s2_784 = __p2_784; \ - int32x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ - uint8x16_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_784; __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_784 = __rev2_784; \ - __ret_784 = __noswap_vusdotq_s32(__rev0_784, __rev1_784, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_784, __p3_784))); \ - __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 3, 2, 1, 0); \ - __ret_784; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdot_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ - int32x2_t __ret_785; \ - int32x2_t __s0_785 = __p0_785; \ - uint8x8_t __s1_785 = __p1_785; \ - int8x16_t __s2_785 = __p2_785; \ -int8x16_t __reint_785 = __s2_785; \ - __ret_785 = vusdot_s32(__s0_785, __s1_785, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_785, __p3_785))); \ - __ret_785; \ -}) -#else -#define vusdot_laneq_s32(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ - int32x2_t __ret_786; \ - int32x2_t __s0_786 = __p0_786; \ - uint8x8_t __s1_786 = __p1_786; \ - int8x16_t __s2_786 = __p2_786; \ - int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ - uint8x8_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_786; __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_786 = __rev2_786; \ - __ret_786 = __noswap_vusdot_s32(__rev0_786, __rev1_786, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_786, __p3_786))); \ - __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 1, 0); \ - __ret_786; \ -}) -#endif - #define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ poly64x1_t __ret; \ poly64x1_t __s1 = __p1; \ @@ -64776,13 +64606,13 @@ int8x16_t __reint_786 = __rev2_786; \ __builtin_neon_vstl1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ }) #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64794,13 +64624,13 @@ __ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64812,13 +64642,13 @@ __ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64830,13 +64660,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64848,13 +64678,13 @@ __ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64866,13 +64696,13 @@ __ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64884,13 +64714,13 @@ __ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64902,13 +64732,13 @@ __ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64920,13 +64750,13 @@ __ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64938,13 +64768,13 @@ __ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64956,13 +64786,13 @@ __ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64974,13 +64804,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64992,13 +64822,13 @@ __ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -65010,13 +64840,13 @@ __ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65028,13 +64858,13 @@ __ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65046,13 +64876,13 @@ __ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -65064,13 +64894,13 @@ __ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65081,13 +64911,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65099,13 +64929,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65117,13 +64947,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65134,13 +64964,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65173,13 +65003,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65191,13 +65021,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65209,13 +65039,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65323,13 +65153,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65340,13 +65170,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint3 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65356,394 +65186,394 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, ui } #endif -__ai __attribute__((target("v8.1a"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); return __ret; } -__ai __attribute__((target("v8.1a"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_lane_s32(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ - int32_t __ret_787; \ - int32_t __s0_787 = __p0_787; \ - int32_t __s1_787 = __p1_787; \ - int32x2_t __s2_787 = __p2_787; \ - __ret_787 = vqrdmlahs_s32(__s0_787, __s1_787, vget_lane_s32(__s2_787, __p3_787)); \ - __ret_787; \ +#define vqrdmlahs_lane_s32(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ + int32_t __ret_760; \ + int32_t __s0_760 = __p0_760; \ + int32_t __s1_760 = __p1_760; \ + int32x2_t __s2_760 = __p2_760; \ + __ret_760 = vqrdmlahs_s32(__s0_760, __s1_760, vget_lane_s32(__s2_760, __p3_760)); \ + __ret_760; \ }) #else -#define vqrdmlahs_lane_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ - int32_t __ret_788; \ - int32_t __s0_788 = __p0_788; \ - int32_t __s1_788 = __p1_788; \ - int32x2_t __s2_788 = __p2_788; \ - int32x2_t __rev2_788; __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 1, 0); \ - __ret_788 = vqrdmlahs_s32(__s0_788, __s1_788, __noswap_vget_lane_s32(__rev2_788, __p3_788)); \ - __ret_788; \ +#define vqrdmlahs_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ + int32_t __ret_761; \ + int32_t __s0_761 = __p0_761; \ + int32_t __s1_761 = __p1_761; \ + int32x2_t __s2_761 = __p2_761; \ + int32x2_t __rev2_761; __rev2_761 = __builtin_shufflevector(__s2_761, __s2_761, 1, 0); \ + __ret_761 = vqrdmlahs_s32(__s0_761, __s1_761, __noswap_vget_lane_s32(__rev2_761, __p3_761)); \ + __ret_761; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_lane_s16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ - int16_t __ret_789; \ - int16_t __s0_789 = __p0_789; \ - int16_t __s1_789 = __p1_789; \ - int16x4_t __s2_789 = __p2_789; \ - __ret_789 = vqrdmlahh_s16(__s0_789, __s1_789, vget_lane_s16(__s2_789, __p3_789)); \ - __ret_789; \ +#define vqrdmlahh_lane_s16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ + int16_t __ret_762; \ + int16_t __s0_762 = __p0_762; \ + int16_t __s1_762 = __p1_762; \ + int16x4_t __s2_762 = __p2_762; \ + __ret_762 = vqrdmlahh_s16(__s0_762, __s1_762, vget_lane_s16(__s2_762, __p3_762)); \ + __ret_762; \ }) #else -#define vqrdmlahh_lane_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ - int16_t __ret_790; \ - int16_t __s0_790 = __p0_790; \ - int16_t __s1_790 = __p1_790; \ - int16x4_t __s2_790 = __p2_790; \ - int16x4_t __rev2_790; __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 3, 2, 1, 0); \ - __ret_790 = vqrdmlahh_s16(__s0_790, __s1_790, __noswap_vget_lane_s16(__rev2_790, __p3_790)); \ - __ret_790; \ +#define vqrdmlahh_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ + int16_t __ret_763; \ + int16_t __s0_763 = __p0_763; \ + int16_t __s1_763 = __p1_763; \ + int16x4_t __s2_763 = __p2_763; \ + int16x4_t __rev2_763; __rev2_763 = __builtin_shufflevector(__s2_763, __s2_763, 3, 2, 1, 0); \ + __ret_763 = vqrdmlahh_s16(__s0_763, __s1_763, __noswap_vget_lane_s16(__rev2_763, __p3_763)); \ + __ret_763; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_laneq_s32(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ - int32_t __ret_791; \ - int32_t __s0_791 = __p0_791; \ - int32_t __s1_791 = __p1_791; \ - int32x4_t __s2_791 = __p2_791; \ - __ret_791 = vqrdmlahs_s32(__s0_791, __s1_791, vgetq_lane_s32(__s2_791, __p3_791)); \ - __ret_791; \ +#define vqrdmlahs_laneq_s32(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ + int32_t __ret_764; \ + int32_t __s0_764 = __p0_764; \ + int32_t __s1_764 = __p1_764; \ + int32x4_t __s2_764 = __p2_764; \ + __ret_764 = vqrdmlahs_s32(__s0_764, __s1_764, vgetq_lane_s32(__s2_764, __p3_764)); \ + __ret_764; \ }) #else -#define vqrdmlahs_laneq_s32(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ - int32_t __ret_792; \ - int32_t __s0_792 = __p0_792; \ - int32_t __s1_792 = __p1_792; \ - int32x4_t __s2_792 = __p2_792; \ - int32x4_t __rev2_792; __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 3, 2, 1, 0); \ - __ret_792 = vqrdmlahs_s32(__s0_792, __s1_792, __noswap_vgetq_lane_s32(__rev2_792, __p3_792)); \ - __ret_792; \ +#define vqrdmlahs_laneq_s32(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ + int32_t __ret_765; \ + int32_t __s0_765 = __p0_765; \ + int32_t __s1_765 = __p1_765; \ + int32x4_t __s2_765 = __p2_765; \ + int32x4_t __rev2_765; __rev2_765 = __builtin_shufflevector(__s2_765, __s2_765, 3, 2, 1, 0); \ + __ret_765 = vqrdmlahs_s32(__s0_765, __s1_765, __noswap_vgetq_lane_s32(__rev2_765, __p3_765)); \ + __ret_765; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_laneq_s16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ - int16_t __ret_793; \ - int16_t __s0_793 = __p0_793; \ - int16_t __s1_793 = __p1_793; \ - int16x8_t __s2_793 = __p2_793; \ - __ret_793 = vqrdmlahh_s16(__s0_793, __s1_793, vgetq_lane_s16(__s2_793, __p3_793)); \ - __ret_793; \ +#define vqrdmlahh_laneq_s16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ + int16_t __ret_766; \ + int16_t __s0_766 = __p0_766; \ + int16_t __s1_766 = __p1_766; \ + int16x8_t __s2_766 = __p2_766; \ + __ret_766 = vqrdmlahh_s16(__s0_766, __s1_766, vgetq_lane_s16(__s2_766, __p3_766)); \ + __ret_766; \ }) #else -#define vqrdmlahh_laneq_s16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ - int16_t __ret_794; \ - int16_t __s0_794 = __p0_794; \ - int16_t __s1_794 = __p1_794; \ - int16x8_t __s2_794 = __p2_794; \ - int16x8_t __rev2_794; __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_794 = vqrdmlahh_s16(__s0_794, __s1_794, __noswap_vgetq_lane_s16(__rev2_794, __p3_794)); \ - __ret_794; \ +#define vqrdmlahh_laneq_s16(__p0_767, __p1_767, __p2_767, __p3_767) __extension__ ({ \ + int16_t __ret_767; \ + int16_t __s0_767 = __p0_767; \ + int16_t __s1_767 = __p1_767; \ + int16x8_t __s2_767 = __p2_767; \ + int16x8_t __rev2_767; __rev2_767 = __builtin_shufflevector(__s2_767, __s2_767, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_767 = vqrdmlahh_s16(__s0_767, __s1_767, __noswap_vgetq_lane_s16(__rev2_767, __p3_767)); \ + __ret_767; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s32(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ - int32x4_t __ret_795; \ - int32x4_t __s0_795 = __p0_795; \ - int32x4_t __s1_795 = __p1_795; \ - int32x4_t __s2_795 = __p2_795; \ - __ret_795 = vqrdmlahq_s32(__s0_795, __s1_795, splatq_laneq_s32(__s2_795, __p3_795)); \ - __ret_795; \ +#define vqrdmlahq_laneq_s32(__p0_768, __p1_768, __p2_768, __p3_768) __extension__ ({ \ + int32x4_t __ret_768; \ + int32x4_t __s0_768 = __p0_768; \ + int32x4_t __s1_768 = __p1_768; \ + int32x4_t __s2_768 = __p2_768; \ + __ret_768 = vqrdmlahq_s32(__s0_768, __s1_768, splatq_laneq_s32(__s2_768, __p3_768)); \ + __ret_768; \ }) #else -#define vqrdmlahq_laneq_s32(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ - int32x4_t __ret_796; \ - int32x4_t __s0_796 = __p0_796; \ - int32x4_t __s1_796 = __p1_796; \ - int32x4_t __s2_796 = __p2_796; \ - int32x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ - int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ - int32x4_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \ - __ret_796 = __noswap_vqrdmlahq_s32(__rev0_796, __rev1_796, __noswap_splatq_laneq_s32(__rev2_796, __p3_796)); \ - __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 3, 2, 1, 0); \ - __ret_796; \ +#define vqrdmlahq_laneq_s32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \ + int32x4_t __ret_769; \ + int32x4_t __s0_769 = __p0_769; \ + int32x4_t __s1_769 = __p1_769; \ + int32x4_t __s2_769 = __p2_769; \ + int32x4_t __rev0_769; __rev0_769 = __builtin_shufflevector(__s0_769, __s0_769, 3, 2, 1, 0); \ + int32x4_t __rev1_769; __rev1_769 = __builtin_shufflevector(__s1_769, __s1_769, 3, 2, 1, 0); \ + int32x4_t __rev2_769; __rev2_769 = __builtin_shufflevector(__s2_769, __s2_769, 3, 2, 1, 0); \ + __ret_769 = __noswap_vqrdmlahq_s32(__rev0_769, __rev1_769, __noswap_splatq_laneq_s32(__rev2_769, __p3_769)); \ + __ret_769 = __builtin_shufflevector(__ret_769, __ret_769, 3, 2, 1, 0); \ + __ret_769; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ - int16x8_t __ret_797; \ - int16x8_t __s0_797 = __p0_797; \ - int16x8_t __s1_797 = __p1_797; \ - int16x8_t __s2_797 = __p2_797; \ - __ret_797 = vqrdmlahq_s16(__s0_797, __s1_797, splatq_laneq_s16(__s2_797, __p3_797)); \ - __ret_797; \ +#define vqrdmlahq_laneq_s16(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \ + int16x8_t __ret_770; \ + int16x8_t __s0_770 = __p0_770; \ + int16x8_t __s1_770 = __p1_770; \ + int16x8_t __s2_770 = __p2_770; \ + __ret_770 = vqrdmlahq_s16(__s0_770, __s1_770, splatq_laneq_s16(__s2_770, __p3_770)); \ + __ret_770; \ }) #else -#define vqrdmlahq_laneq_s16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ - int16x8_t __ret_798; \ - int16x8_t __s0_798 = __p0_798; \ - int16x8_t __s1_798 = __p1_798; \ - int16x8_t __s2_798 = __p2_798; \ - int16x8_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_798 = __noswap_vqrdmlahq_s16(__rev0_798, __rev1_798, __noswap_splatq_laneq_s16(__rev2_798, __p3_798)); \ - __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_798; \ +#define vqrdmlahq_laneq_s16(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \ + int16x8_t __ret_771; \ + int16x8_t __s0_771 = __p0_771; \ + int16x8_t __s1_771 = __p1_771; \ + int16x8_t __s2_771 = __p2_771; \ + int16x8_t __rev0_771; __rev0_771 = __builtin_shufflevector(__s0_771, __s0_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_771; __rev1_771 = __builtin_shufflevector(__s1_771, __s1_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_771; __rev2_771 = __builtin_shufflevector(__s2_771, __s2_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771 = __noswap_vqrdmlahq_s16(__rev0_771, __rev1_771, __noswap_splatq_laneq_s16(__rev2_771, __p3_771)); \ + __ret_771 = __builtin_shufflevector(__ret_771, __ret_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s32(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ - int32x2_t __ret_799; \ - int32x2_t __s0_799 = __p0_799; \ - int32x2_t __s1_799 = __p1_799; \ - int32x4_t __s2_799 = __p2_799; \ - __ret_799 = vqrdmlah_s32(__s0_799, __s1_799, splat_laneq_s32(__s2_799, __p3_799)); \ - __ret_799; \ +#define vqrdmlah_laneq_s32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \ + int32x2_t __ret_772; \ + int32x2_t __s0_772 = __p0_772; \ + int32x2_t __s1_772 = __p1_772; \ + int32x4_t __s2_772 = __p2_772; \ + __ret_772 = vqrdmlah_s32(__s0_772, __s1_772, splat_laneq_s32(__s2_772, __p3_772)); \ + __ret_772; \ }) #else -#define vqrdmlah_laneq_s32(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ - int32x2_t __ret_800; \ - int32x2_t __s0_800 = __p0_800; \ - int32x2_t __s1_800 = __p1_800; \ - int32x4_t __s2_800 = __p2_800; \ - int32x2_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \ - int32x2_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \ - int32x4_t __rev2_800; __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \ - __ret_800 = __noswap_vqrdmlah_s32(__rev0_800, __rev1_800, __noswap_splat_laneq_s32(__rev2_800, __p3_800)); \ - __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \ - __ret_800; \ +#define vqrdmlah_laneq_s32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \ + int32x2_t __ret_773; \ + int32x2_t __s0_773 = __p0_773; \ + int32x2_t __s1_773 = __p1_773; \ + int32x4_t __s2_773 = __p2_773; \ + int32x2_t __rev0_773; __rev0_773 = __builtin_shufflevector(__s0_773, __s0_773, 1, 0); \ + int32x2_t __rev1_773; __rev1_773 = __builtin_shufflevector(__s1_773, __s1_773, 1, 0); \ + int32x4_t __rev2_773; __rev2_773 = __builtin_shufflevector(__s2_773, __s2_773, 3, 2, 1, 0); \ + __ret_773 = __noswap_vqrdmlah_s32(__rev0_773, __rev1_773, __noswap_splat_laneq_s32(__rev2_773, __p3_773)); \ + __ret_773 = __builtin_shufflevector(__ret_773, __ret_773, 1, 0); \ + __ret_773; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ - int16x4_t __ret_801; \ - int16x4_t __s0_801 = __p0_801; \ - int16x4_t __s1_801 = __p1_801; \ - int16x8_t __s2_801 = __p2_801; \ - __ret_801 = vqrdmlah_s16(__s0_801, __s1_801, splat_laneq_s16(__s2_801, __p3_801)); \ - __ret_801; \ +#define vqrdmlah_laneq_s16(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \ + int16x4_t __ret_774; \ + int16x4_t __s0_774 = __p0_774; \ + int16x4_t __s1_774 = __p1_774; \ + int16x8_t __s2_774 = __p2_774; \ + __ret_774 = vqrdmlah_s16(__s0_774, __s1_774, splat_laneq_s16(__s2_774, __p3_774)); \ + __ret_774; \ }) #else -#define vqrdmlah_laneq_s16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ - int16x4_t __ret_802; \ - int16x4_t __s0_802 = __p0_802; \ - int16x4_t __s1_802 = __p1_802; \ - int16x8_t __s2_802 = __p2_802; \ - int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ - int16x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ - int16x8_t __rev2_802; __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_802 = __noswap_vqrdmlah_s16(__rev0_802, __rev1_802, __noswap_splat_laneq_s16(__rev2_802, __p3_802)); \ - __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \ - __ret_802; \ +#define vqrdmlah_laneq_s16(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \ + int16x4_t __ret_775; \ + int16x4_t __s0_775 = __p0_775; \ + int16x4_t __s1_775 = __p1_775; \ + int16x8_t __s2_775 = __p2_775; \ + int16x4_t __rev0_775; __rev0_775 = __builtin_shufflevector(__s0_775, __s0_775, 3, 2, 1, 0); \ + int16x4_t __rev1_775; __rev1_775 = __builtin_shufflevector(__s1_775, __s1_775, 3, 2, 1, 0); \ + int16x8_t __rev2_775; __rev2_775 = __builtin_shufflevector(__s2_775, __s2_775, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_775 = __noswap_vqrdmlah_s16(__rev0_775, __rev1_775, __noswap_splat_laneq_s16(__rev2_775, __p3_775)); \ + __ret_775 = __builtin_shufflevector(__ret_775, __ret_775, 3, 2, 1, 0); \ + __ret_775; \ }) #endif -__ai __attribute__((target("v8.1a"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); return __ret; } -__ai __attribute__((target("v8.1a"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_lane_s32(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ - int32_t __ret_803; \ - int32_t __s0_803 = __p0_803; \ - int32_t __s1_803 = __p1_803; \ - int32x2_t __s2_803 = __p2_803; \ - __ret_803 = vqrdmlshs_s32(__s0_803, __s1_803, vget_lane_s32(__s2_803, __p3_803)); \ - __ret_803; \ +#define vqrdmlshs_lane_s32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \ + int32_t __ret_776; \ + int32_t __s0_776 = __p0_776; \ + int32_t __s1_776 = __p1_776; \ + int32x2_t __s2_776 = __p2_776; \ + __ret_776 = vqrdmlshs_s32(__s0_776, __s1_776, vget_lane_s32(__s2_776, __p3_776)); \ + __ret_776; \ }) #else -#define vqrdmlshs_lane_s32(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ - int32_t __ret_804; \ - int32_t __s0_804 = __p0_804; \ - int32_t __s1_804 = __p1_804; \ - int32x2_t __s2_804 = __p2_804; \ - int32x2_t __rev2_804; __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 1, 0); \ - __ret_804 = vqrdmlshs_s32(__s0_804, __s1_804, __noswap_vget_lane_s32(__rev2_804, __p3_804)); \ - __ret_804; \ +#define vqrdmlshs_lane_s32(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \ + int32_t __ret_777; \ + int32_t __s0_777 = __p0_777; \ + int32_t __s1_777 = __p1_777; \ + int32x2_t __s2_777 = __p2_777; \ + int32x2_t __rev2_777; __rev2_777 = __builtin_shufflevector(__s2_777, __s2_777, 1, 0); \ + __ret_777 = vqrdmlshs_s32(__s0_777, __s1_777, __noswap_vget_lane_s32(__rev2_777, __p3_777)); \ + __ret_777; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_lane_s16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ - int16_t __ret_805; \ - int16_t __s0_805 = __p0_805; \ - int16_t __s1_805 = __p1_805; \ - int16x4_t __s2_805 = __p2_805; \ - __ret_805 = vqrdmlshh_s16(__s0_805, __s1_805, vget_lane_s16(__s2_805, __p3_805)); \ - __ret_805; \ +#define vqrdmlshh_lane_s16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \ + int16_t __ret_778; \ + int16_t __s0_778 = __p0_778; \ + int16_t __s1_778 = __p1_778; \ + int16x4_t __s2_778 = __p2_778; \ + __ret_778 = vqrdmlshh_s16(__s0_778, __s1_778, vget_lane_s16(__s2_778, __p3_778)); \ + __ret_778; \ }) #else -#define vqrdmlshh_lane_s16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ - int16_t __ret_806; \ - int16_t __s0_806 = __p0_806; \ - int16_t __s1_806 = __p1_806; \ - int16x4_t __s2_806 = __p2_806; \ - int16x4_t __rev2_806; __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 3, 2, 1, 0); \ - __ret_806 = vqrdmlshh_s16(__s0_806, __s1_806, __noswap_vget_lane_s16(__rev2_806, __p3_806)); \ - __ret_806; \ +#define vqrdmlshh_lane_s16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ + int16_t __ret_779; \ + int16_t __s0_779 = __p0_779; \ + int16_t __s1_779 = __p1_779; \ + int16x4_t __s2_779 = __p2_779; \ + int16x4_t __rev2_779; __rev2_779 = __builtin_shufflevector(__s2_779, __s2_779, 3, 2, 1, 0); \ + __ret_779 = vqrdmlshh_s16(__s0_779, __s1_779, __noswap_vget_lane_s16(__rev2_779, __p3_779)); \ + __ret_779; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_laneq_s32(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ - int32_t __ret_807; \ - int32_t __s0_807 = __p0_807; \ - int32_t __s1_807 = __p1_807; \ - int32x4_t __s2_807 = __p2_807; \ - __ret_807 = vqrdmlshs_s32(__s0_807, __s1_807, vgetq_lane_s32(__s2_807, __p3_807)); \ - __ret_807; \ +#define vqrdmlshs_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ + int32_t __ret_780; \ + int32_t __s0_780 = __p0_780; \ + int32_t __s1_780 = __p1_780; \ + int32x4_t __s2_780 = __p2_780; \ + __ret_780 = vqrdmlshs_s32(__s0_780, __s1_780, vgetq_lane_s32(__s2_780, __p3_780)); \ + __ret_780; \ }) #else -#define vqrdmlshs_laneq_s32(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ - int32_t __ret_808; \ - int32_t __s0_808 = __p0_808; \ - int32_t __s1_808 = __p1_808; \ - int32x4_t __s2_808 = __p2_808; \ - int32x4_t __rev2_808; __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 3, 2, 1, 0); \ - __ret_808 = vqrdmlshs_s32(__s0_808, __s1_808, __noswap_vgetq_lane_s32(__rev2_808, __p3_808)); \ - __ret_808; \ +#define vqrdmlshs_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ + int32_t __ret_781; \ + int32_t __s0_781 = __p0_781; \ + int32_t __s1_781 = __p1_781; \ + int32x4_t __s2_781 = __p2_781; \ + int32x4_t __rev2_781; __rev2_781 = __builtin_shufflevector(__s2_781, __s2_781, 3, 2, 1, 0); \ + __ret_781 = vqrdmlshs_s32(__s0_781, __s1_781, __noswap_vgetq_lane_s32(__rev2_781, __p3_781)); \ + __ret_781; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_laneq_s16(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ - int16_t __ret_809; \ - int16_t __s0_809 = __p0_809; \ - int16_t __s1_809 = __p1_809; \ - int16x8_t __s2_809 = __p2_809; \ - __ret_809 = vqrdmlshh_s16(__s0_809, __s1_809, vgetq_lane_s16(__s2_809, __p3_809)); \ - __ret_809; \ +#define vqrdmlshh_laneq_s16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ + int16_t __ret_782; \ + int16_t __s0_782 = __p0_782; \ + int16_t __s1_782 = __p1_782; \ + int16x8_t __s2_782 = __p2_782; \ + __ret_782 = vqrdmlshh_s16(__s0_782, __s1_782, vgetq_lane_s16(__s2_782, __p3_782)); \ + __ret_782; \ }) #else -#define vqrdmlshh_laneq_s16(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ - int16_t __ret_810; \ - int16_t __s0_810 = __p0_810; \ - int16_t __s1_810 = __p1_810; \ - int16x8_t __s2_810 = __p2_810; \ - int16x8_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_810 = vqrdmlshh_s16(__s0_810, __s1_810, __noswap_vgetq_lane_s16(__rev2_810, __p3_810)); \ - __ret_810; \ +#define vqrdmlshh_laneq_s16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ + int16_t __ret_783; \ + int16_t __s0_783 = __p0_783; \ + int16_t __s1_783 = __p1_783; \ + int16x8_t __s2_783 = __p2_783; \ + int16x8_t __rev2_783; __rev2_783 = __builtin_shufflevector(__s2_783, __s2_783, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_783 = vqrdmlshh_s16(__s0_783, __s1_783, __noswap_vgetq_lane_s16(__rev2_783, __p3_783)); \ + __ret_783; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s32(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ - int32x4_t __ret_811; \ - int32x4_t __s0_811 = __p0_811; \ - int32x4_t __s1_811 = __p1_811; \ - int32x4_t __s2_811 = __p2_811; \ - __ret_811 = vqrdmlshq_s32(__s0_811, __s1_811, splatq_laneq_s32(__s2_811, __p3_811)); \ - __ret_811; \ +#define vqrdmlshq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ + int32x4_t __ret_784; \ + int32x4_t __s0_784 = __p0_784; \ + int32x4_t __s1_784 = __p1_784; \ + int32x4_t __s2_784 = __p2_784; \ + __ret_784 = vqrdmlshq_s32(__s0_784, __s1_784, splatq_laneq_s32(__s2_784, __p3_784)); \ + __ret_784; \ }) #else -#define vqrdmlshq_laneq_s32(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ - int32x4_t __ret_812; \ - int32x4_t __s0_812 = __p0_812; \ - int32x4_t __s1_812 = __p1_812; \ - int32x4_t __s2_812 = __p2_812; \ - int32x4_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \ - int32x4_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 3, 2, 1, 0); \ - int32x4_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 3, 2, 1, 0); \ - __ret_812 = __noswap_vqrdmlshq_s32(__rev0_812, __rev1_812, __noswap_splatq_laneq_s32(__rev2_812, __p3_812)); \ - __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \ - __ret_812; \ +#define vqrdmlshq_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ + int32x4_t __ret_785; \ + int32x4_t __s0_785 = __p0_785; \ + int32x4_t __s1_785 = __p1_785; \ + int32x4_t __s2_785 = __p2_785; \ + int32x4_t __rev0_785; __rev0_785 = __builtin_shufflevector(__s0_785, __s0_785, 3, 2, 1, 0); \ + int32x4_t __rev1_785; __rev1_785 = __builtin_shufflevector(__s1_785, __s1_785, 3, 2, 1, 0); \ + int32x4_t __rev2_785; __rev2_785 = __builtin_shufflevector(__s2_785, __s2_785, 3, 2, 1, 0); \ + __ret_785 = __noswap_vqrdmlshq_s32(__rev0_785, __rev1_785, __noswap_splatq_laneq_s32(__rev2_785, __p3_785)); \ + __ret_785 = __builtin_shufflevector(__ret_785, __ret_785, 3, 2, 1, 0); \ + __ret_785; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s16(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ - int16x8_t __ret_813; \ - int16x8_t __s0_813 = __p0_813; \ - int16x8_t __s1_813 = __p1_813; \ - int16x8_t __s2_813 = __p2_813; \ - __ret_813 = vqrdmlshq_s16(__s0_813, __s1_813, splatq_laneq_s16(__s2_813, __p3_813)); \ - __ret_813; \ +#define vqrdmlshq_laneq_s16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ + int16x8_t __ret_786; \ + int16x8_t __s0_786 = __p0_786; \ + int16x8_t __s1_786 = __p1_786; \ + int16x8_t __s2_786 = __p2_786; \ + __ret_786 = vqrdmlshq_s16(__s0_786, __s1_786, splatq_laneq_s16(__s2_786, __p3_786)); \ + __ret_786; \ }) #else -#define vqrdmlshq_laneq_s16(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ - int16x8_t __ret_814; \ - int16x8_t __s0_814 = __p0_814; \ - int16x8_t __s1_814 = __p1_814; \ - int16x8_t __s2_814 = __p2_814; \ - int16x8_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_814; __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_814 = __noswap_vqrdmlshq_s16(__rev0_814, __rev1_814, __noswap_splatq_laneq_s16(__rev2_814, __p3_814)); \ - __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_814; \ +#define vqrdmlshq_laneq_s16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ + int16x8_t __ret_787; \ + int16x8_t __s0_787 = __p0_787; \ + int16x8_t __s1_787 = __p1_787; \ + int16x8_t __s2_787 = __p2_787; \ + int16x8_t __rev0_787; __rev0_787 = __builtin_shufflevector(__s0_787, __s0_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_787; __rev1_787 = __builtin_shufflevector(__s1_787, __s1_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_787; __rev2_787 = __builtin_shufflevector(__s2_787, __s2_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787 = __noswap_vqrdmlshq_s16(__rev0_787, __rev1_787, __noswap_splatq_laneq_s16(__rev2_787, __p3_787)); \ + __ret_787 = __builtin_shufflevector(__ret_787, __ret_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ - int32x2_t __ret_815; \ - int32x2_t __s0_815 = __p0_815; \ - int32x2_t __s1_815 = __p1_815; \ - int32x4_t __s2_815 = __p2_815; \ - __ret_815 = vqrdmlsh_s32(__s0_815, __s1_815, splat_laneq_s32(__s2_815, __p3_815)); \ - __ret_815; \ +#define vqrdmlsh_laneq_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ + int32x2_t __ret_788; \ + int32x2_t __s0_788 = __p0_788; \ + int32x2_t __s1_788 = __p1_788; \ + int32x4_t __s2_788 = __p2_788; \ + __ret_788 = vqrdmlsh_s32(__s0_788, __s1_788, splat_laneq_s32(__s2_788, __p3_788)); \ + __ret_788; \ }) #else -#define vqrdmlsh_laneq_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ - int32x2_t __ret_816; \ - int32x2_t __s0_816 = __p0_816; \ - int32x2_t __s1_816 = __p1_816; \ - int32x4_t __s2_816 = __p2_816; \ - int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ - int32x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ - int32x4_t __rev2_816; __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 3, 2, 1, 0); \ - __ret_816 = __noswap_vqrdmlsh_s32(__rev0_816, __rev1_816, __noswap_splat_laneq_s32(__rev2_816, __p3_816)); \ - __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \ - __ret_816; \ +#define vqrdmlsh_laneq_s32(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ + int32x2_t __ret_789; \ + int32x2_t __s0_789 = __p0_789; \ + int32x2_t __s1_789 = __p1_789; \ + int32x4_t __s2_789 = __p2_789; \ + int32x2_t __rev0_789; __rev0_789 = __builtin_shufflevector(__s0_789, __s0_789, 1, 0); \ + int32x2_t __rev1_789; __rev1_789 = __builtin_shufflevector(__s1_789, __s1_789, 1, 0); \ + int32x4_t __rev2_789; __rev2_789 = __builtin_shufflevector(__s2_789, __s2_789, 3, 2, 1, 0); \ + __ret_789 = __noswap_vqrdmlsh_s32(__rev0_789, __rev1_789, __noswap_splat_laneq_s32(__rev2_789, __p3_789)); \ + __ret_789 = __builtin_shufflevector(__ret_789, __ret_789, 1, 0); \ + __ret_789; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s16(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ - int16x4_t __ret_817; \ - int16x4_t __s0_817 = __p0_817; \ - int16x4_t __s1_817 = __p1_817; \ - int16x8_t __s2_817 = __p2_817; \ - __ret_817 = vqrdmlsh_s16(__s0_817, __s1_817, splat_laneq_s16(__s2_817, __p3_817)); \ - __ret_817; \ +#define vqrdmlsh_laneq_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ + int16x4_t __ret_790; \ + int16x4_t __s0_790 = __p0_790; \ + int16x4_t __s1_790 = __p1_790; \ + int16x8_t __s2_790 = __p2_790; \ + __ret_790 = vqrdmlsh_s16(__s0_790, __s1_790, splat_laneq_s16(__s2_790, __p3_790)); \ + __ret_790; \ }) #else -#define vqrdmlsh_laneq_s16(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ - int16x4_t __ret_818; \ - int16x4_t __s0_818 = __p0_818; \ - int16x4_t __s1_818 = __p1_818; \ - int16x8_t __s2_818 = __p2_818; \ - int16x4_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \ - int16x4_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 3, 2, 1, 0); \ - int16x8_t __rev2_818; __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_818 = __noswap_vqrdmlsh_s16(__rev0_818, __rev1_818, __noswap_splat_laneq_s16(__rev2_818, __p3_818)); \ - __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \ - __ret_818; \ +#define vqrdmlsh_laneq_s16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ + int16x4_t __ret_791; \ + int16x4_t __s0_791 = __p0_791; \ + int16x4_t __s1_791 = __p1_791; \ + int16x8_t __s2_791 = __p2_791; \ + int16x4_t __rev0_791; __rev0_791 = __builtin_shufflevector(__s0_791, __s0_791, 3, 2, 1, 0); \ + int16x4_t __rev1_791; __rev1_791 = __builtin_shufflevector(__s1_791, __s1_791, 3, 2, 1, 0); \ + int16x8_t __rev2_791; __rev2_791 = __builtin_shufflevector(__s2_791, __s2_791, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_791 = __noswap_vqrdmlsh_s16(__rev0_791, __rev1_791, __noswap_splat_laneq_s16(__rev2_791, __p3_791)); \ + __ret_791 = __builtin_shufflevector(__ret_791, __ret_791, 3, 2, 1, 0); \ + __ret_791; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65754,13 +65584,13 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65771,13 +65601,13 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t _ #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65786,116 +65616,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, f __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_lane_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ - float64x1_t __ret_819; \ - float64x1_t __s0_819 = __p0_819; \ - float64x1_t __s1_819 = __p1_819; \ - float64x1_t __s2_819 = __p2_819; \ -float64x1_t __reint_819 = __s2_819; \ -uint64x2_t __reint1_819 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ - __ret_819 = vcmla_f64(__s0_819, __s1_819, *(float64x1_t *) &__reint1_819); \ - __ret_819; \ +#define vcmla_lane_f64(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ + float64x1_t __ret_792; \ + float64x1_t __s0_792 = __p0_792; \ + float64x1_t __s1_792 = __p1_792; \ + float64x1_t __s2_792 = __p2_792; \ +float64x1_t __reint_792 = __s2_792; \ +uint64x2_t __reint1_792 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792), vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792)}; \ + __ret_792 = vcmla_f64(__s0_792, __s1_792, *(float64x1_t *) &__reint1_792); \ + __ret_792; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f64(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ - float64x2_t __ret_820; \ - float64x2_t __s0_820 = __p0_820; \ - float64x2_t __s1_820 = __p1_820; \ - float64x1_t __s2_820 = __p2_820; \ -float64x1_t __reint_820 = __s2_820; \ -uint64x2_t __reint1_820 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820), vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820)}; \ - __ret_820 = vcmlaq_f64(__s0_820, __s1_820, *(float64x2_t *) &__reint1_820); \ - __ret_820; \ +#define vcmlaq_lane_f64(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ + float64x2_t __ret_793; \ + float64x2_t __s0_793 = __p0_793; \ + float64x2_t __s1_793 = __p1_793; \ + float64x1_t __s2_793 = __p2_793; \ +float64x1_t __reint_793 = __s2_793; \ +uint64x2_t __reint1_793 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793), vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793)}; \ + __ret_793 = vcmlaq_f64(__s0_793, __s1_793, *(float64x2_t *) &__reint1_793); \ + __ret_793; \ }) #else -#define vcmlaq_lane_f64(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ - float64x2_t __ret_821; \ - float64x2_t __s0_821 = __p0_821; \ - float64x2_t __s1_821 = __p1_821; \ - float64x1_t __s2_821 = __p2_821; \ - float64x2_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 1, 0); \ - float64x2_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 1, 0); \ -float64x1_t __reint_821 = __s2_821; \ -uint64x2_t __reint1_821 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821)}; \ - __ret_821 = __noswap_vcmlaq_f64(__rev0_821, __rev1_821, *(float64x2_t *) &__reint1_821); \ - __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 1, 0); \ - __ret_821; \ +#define vcmlaq_lane_f64(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ + float64x2_t __ret_794; \ + float64x2_t __s0_794 = __p0_794; \ + float64x2_t __s1_794 = __p1_794; \ + float64x1_t __s2_794 = __p2_794; \ + float64x2_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \ + float64x2_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \ +float64x1_t __reint_794 = __s2_794; \ +uint64x2_t __reint1_794 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794)}; \ + __ret_794 = __noswap_vcmlaq_f64(__rev0_794, __rev1_794, *(float64x2_t *) &__reint1_794); \ + __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 1, 0); \ + __ret_794; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f64(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ - float64x1_t __ret_822; \ - float64x1_t __s0_822 = __p0_822; \ - float64x1_t __s1_822 = __p1_822; \ - float64x2_t __s2_822 = __p2_822; \ -float64x2_t __reint_822 = __s2_822; \ -uint64x2_t __reint1_822 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822), vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822)}; \ - __ret_822 = vcmla_f64(__s0_822, __s1_822, *(float64x1_t *) &__reint1_822); \ - __ret_822; \ +#define vcmla_laneq_f64(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ + float64x1_t __ret_795; \ + float64x1_t __s0_795 = __p0_795; \ + float64x1_t __s1_795 = __p1_795; \ + float64x2_t __s2_795 = __p2_795; \ +float64x2_t __reint_795 = __s2_795; \ +uint64x2_t __reint1_795 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795), vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795)}; \ + __ret_795 = vcmla_f64(__s0_795, __s1_795, *(float64x1_t *) &__reint1_795); \ + __ret_795; \ }) #else -#define vcmla_laneq_f64(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ - float64x1_t __ret_823; \ - float64x1_t __s0_823 = __p0_823; \ - float64x1_t __s1_823 = __p1_823; \ - float64x2_t __s2_823 = __p2_823; \ - float64x2_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 1, 0); \ -float64x2_t __reint_823 = __rev2_823; \ -uint64x2_t __reint1_823 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823)}; \ - __ret_823 = vcmla_f64(__s0_823, __s1_823, *(float64x1_t *) &__reint1_823); \ - __ret_823; \ +#define vcmla_laneq_f64(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ + float64x1_t __ret_796; \ + float64x1_t __s0_796 = __p0_796; \ + float64x1_t __s1_796 = __p1_796; \ + float64x2_t __s2_796 = __p2_796; \ + float64x2_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 1, 0); \ +float64x2_t __reint_796 = __rev2_796; \ +uint64x2_t __reint1_796 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796)}; \ + __ret_796 = vcmla_f64(__s0_796, __s1_796, *(float64x1_t *) &__reint1_796); \ + __ret_796; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f64(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ - float64x2_t __ret_824; \ - float64x2_t __s0_824 = __p0_824; \ - float64x2_t __s1_824 = __p1_824; \ - float64x2_t __s2_824 = __p2_824; \ -float64x2_t __reint_824 = __s2_824; \ -uint64x2_t __reint1_824 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824), vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824)}; \ - __ret_824 = vcmlaq_f64(__s0_824, __s1_824, *(float64x2_t *) &__reint1_824); \ - __ret_824; \ +#define vcmlaq_laneq_f64(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ + float64x2_t __ret_797; \ + float64x2_t __s0_797 = __p0_797; \ + float64x2_t __s1_797 = __p1_797; \ + float64x2_t __s2_797 = __p2_797; \ +float64x2_t __reint_797 = __s2_797; \ +uint64x2_t __reint1_797 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797), vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797)}; \ + __ret_797 = vcmlaq_f64(__s0_797, __s1_797, *(float64x2_t *) &__reint1_797); \ + __ret_797; \ }) #else -#define vcmlaq_laneq_f64(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ - float64x2_t __ret_825; \ - float64x2_t __s0_825 = __p0_825; \ - float64x2_t __s1_825 = __p1_825; \ - float64x2_t __s2_825 = __p2_825; \ - float64x2_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 1, 0); \ - float64x2_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 1, 0); \ - float64x2_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 1, 0); \ -float64x2_t __reint_825 = __rev2_825; \ -uint64x2_t __reint1_825 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825)}; \ - __ret_825 = __noswap_vcmlaq_f64(__rev0_825, __rev1_825, *(float64x2_t *) &__reint1_825); \ - __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 1, 0); \ - __ret_825; \ +#define vcmlaq_laneq_f64(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ + float64x2_t __ret_798; \ + float64x2_t __s0_798 = __p0_798; \ + float64x2_t __s1_798 = __p1_798; \ + float64x2_t __s2_798 = __p2_798; \ + float64x2_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \ + float64x2_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \ + float64x2_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 1, 0); \ +float64x2_t __reint_798 = __rev2_798; \ +uint64x2_t __reint1_798 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798)}; \ + __ret_798 = __noswap_vcmlaq_f64(__rev0_798, __rev1_798, *(float64x2_t *) &__reint1_798); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 1, 0); \ + __ret_798; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65904,116 +65734,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot180_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot180_lane_f64(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ - float64x1_t __ret_826; \ - float64x1_t __s0_826 = __p0_826; \ - float64x1_t __s1_826 = __p1_826; \ - float64x1_t __s2_826 = __p2_826; \ -float64x1_t __reint_826 = __s2_826; \ -uint64x2_t __reint1_826 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826), vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826)}; \ - __ret_826 = vcmla_rot180_f64(__s0_826, __s1_826, *(float64x1_t *) &__reint1_826); \ - __ret_826; \ +#define vcmla_rot180_lane_f64(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ + float64x1_t __ret_799; \ + float64x1_t __s0_799 = __p0_799; \ + float64x1_t __s1_799 = __p1_799; \ + float64x1_t __s2_799 = __p2_799; \ +float64x1_t __reint_799 = __s2_799; \ +uint64x2_t __reint1_799 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799), vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799)}; \ + __ret_799 = vcmla_rot180_f64(__s0_799, __s1_799, *(float64x1_t *) &__reint1_799); \ + __ret_799; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f64(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ - float64x2_t __ret_827; \ - float64x2_t __s0_827 = __p0_827; \ - float64x2_t __s1_827 = __p1_827; \ - float64x1_t __s2_827 = __p2_827; \ -float64x1_t __reint_827 = __s2_827; \ -uint64x2_t __reint1_827 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827), vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827)}; \ - __ret_827 = vcmlaq_rot180_f64(__s0_827, __s1_827, *(float64x2_t *) &__reint1_827); \ - __ret_827; \ +#define vcmlaq_rot180_lane_f64(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ + float64x2_t __ret_800; \ + float64x2_t __s0_800 = __p0_800; \ + float64x2_t __s1_800 = __p1_800; \ + float64x1_t __s2_800 = __p2_800; \ +float64x1_t __reint_800 = __s2_800; \ +uint64x2_t __reint1_800 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800), vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800)}; \ + __ret_800 = vcmlaq_rot180_f64(__s0_800, __s1_800, *(float64x2_t *) &__reint1_800); \ + __ret_800; \ }) #else -#define vcmlaq_rot180_lane_f64(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ - float64x2_t __ret_828; \ - float64x2_t __s0_828 = __p0_828; \ - float64x2_t __s1_828 = __p1_828; \ - float64x1_t __s2_828 = __p2_828; \ - float64x2_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 1, 0); \ - float64x2_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 1, 0); \ -float64x1_t __reint_828 = __s2_828; \ -uint64x2_t __reint1_828 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828)}; \ - __ret_828 = __noswap_vcmlaq_rot180_f64(__rev0_828, __rev1_828, *(float64x2_t *) &__reint1_828); \ - __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ - __ret_828; \ +#define vcmlaq_rot180_lane_f64(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ + float64x2_t __ret_801; \ + float64x2_t __s0_801 = __p0_801; \ + float64x2_t __s1_801 = __p1_801; \ + float64x1_t __s2_801 = __p2_801; \ + float64x2_t __rev0_801; __rev0_801 = __builtin_shufflevector(__s0_801, __s0_801, 1, 0); \ + float64x2_t __rev1_801; __rev1_801 = __builtin_shufflevector(__s1_801, __s1_801, 1, 0); \ +float64x1_t __reint_801 = __s2_801; \ +uint64x2_t __reint1_801 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801)}; \ + __ret_801 = __noswap_vcmlaq_rot180_f64(__rev0_801, __rev1_801, *(float64x2_t *) &__reint1_801); \ + __ret_801 = __builtin_shufflevector(__ret_801, __ret_801, 1, 0); \ + __ret_801; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f64(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ - float64x1_t __ret_829; \ - float64x1_t __s0_829 = __p0_829; \ - float64x1_t __s1_829 = __p1_829; \ - float64x2_t __s2_829 = __p2_829; \ -float64x2_t __reint_829 = __s2_829; \ -uint64x2_t __reint1_829 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829), vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829)}; \ - __ret_829 = vcmla_rot180_f64(__s0_829, __s1_829, *(float64x1_t *) &__reint1_829); \ - __ret_829; \ +#define vcmla_rot180_laneq_f64(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ + float64x1_t __ret_802; \ + float64x1_t __s0_802 = __p0_802; \ + float64x1_t __s1_802 = __p1_802; \ + float64x2_t __s2_802 = __p2_802; \ +float64x2_t __reint_802 = __s2_802; \ +uint64x2_t __reint1_802 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802), vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802)}; \ + __ret_802 = vcmla_rot180_f64(__s0_802, __s1_802, *(float64x1_t *) &__reint1_802); \ + __ret_802; \ }) #else -#define vcmla_rot180_laneq_f64(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ - float64x1_t __ret_830; \ - float64x1_t __s0_830 = __p0_830; \ - float64x1_t __s1_830 = __p1_830; \ - float64x2_t __s2_830 = __p2_830; \ - float64x2_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 1, 0); \ -float64x2_t __reint_830 = __rev2_830; \ -uint64x2_t __reint1_830 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830)}; \ - __ret_830 = vcmla_rot180_f64(__s0_830, __s1_830, *(float64x1_t *) &__reint1_830); \ - __ret_830; \ +#define vcmla_rot180_laneq_f64(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ + float64x1_t __ret_803; \ + float64x1_t __s0_803 = __p0_803; \ + float64x1_t __s1_803 = __p1_803; \ + float64x2_t __s2_803 = __p2_803; \ + float64x2_t __rev2_803; __rev2_803 = __builtin_shufflevector(__s2_803, __s2_803, 1, 0); \ +float64x2_t __reint_803 = __rev2_803; \ +uint64x2_t __reint1_803 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803)}; \ + __ret_803 = vcmla_rot180_f64(__s0_803, __s1_803, *(float64x1_t *) &__reint1_803); \ + __ret_803; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f64(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ - float64x2_t __ret_831; \ - float64x2_t __s0_831 = __p0_831; \ - float64x2_t __s1_831 = __p1_831; \ - float64x2_t __s2_831 = __p2_831; \ -float64x2_t __reint_831 = __s2_831; \ -uint64x2_t __reint1_831 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831), vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831)}; \ - __ret_831 = vcmlaq_rot180_f64(__s0_831, __s1_831, *(float64x2_t *) &__reint1_831); \ - __ret_831; \ +#define vcmlaq_rot180_laneq_f64(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ + float64x2_t __ret_804; \ + float64x2_t __s0_804 = __p0_804; \ + float64x2_t __s1_804 = __p1_804; \ + float64x2_t __s2_804 = __p2_804; \ +float64x2_t __reint_804 = __s2_804; \ +uint64x2_t __reint1_804 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804), vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804)}; \ + __ret_804 = vcmlaq_rot180_f64(__s0_804, __s1_804, *(float64x2_t *) &__reint1_804); \ + __ret_804; \ }) #else -#define vcmlaq_rot180_laneq_f64(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ - float64x2_t __ret_832; \ - float64x2_t __s0_832 = __p0_832; \ - float64x2_t __s1_832 = __p1_832; \ - float64x2_t __s2_832 = __p2_832; \ - float64x2_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 1, 0); \ - float64x2_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 1, 0); \ - float64x2_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 1, 0); \ -float64x2_t __reint_832 = __rev2_832; \ -uint64x2_t __reint1_832 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832)}; \ - __ret_832 = __noswap_vcmlaq_rot180_f64(__rev0_832, __rev1_832, *(float64x2_t *) &__reint1_832); \ - __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 1, 0); \ - __ret_832; \ +#define vcmlaq_rot180_laneq_f64(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ + float64x2_t __ret_805; \ + float64x2_t __s0_805 = __p0_805; \ + float64x2_t __s1_805 = __p1_805; \ + float64x2_t __s2_805 = __p2_805; \ + float64x2_t __rev0_805; __rev0_805 = __builtin_shufflevector(__s0_805, __s0_805, 1, 0); \ + float64x2_t __rev1_805; __rev1_805 = __builtin_shufflevector(__s1_805, __s1_805, 1, 0); \ + float64x2_t __rev2_805; __rev2_805 = __builtin_shufflevector(__s2_805, __s2_805, 1, 0); \ +float64x2_t __reint_805 = __rev2_805; \ +uint64x2_t __reint1_805 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805)}; \ + __ret_805 = __noswap_vcmlaq_rot180_f64(__rev0_805, __rev1_805, *(float64x2_t *) &__reint1_805); \ + __ret_805 = __builtin_shufflevector(__ret_805, __ret_805, 1, 0); \ + __ret_805; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66022,116 +65852,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot270_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot270_lane_f64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ - float64x1_t __ret_833; \ - float64x1_t __s0_833 = __p0_833; \ - float64x1_t __s1_833 = __p1_833; \ - float64x1_t __s2_833 = __p2_833; \ -float64x1_t __reint_833 = __s2_833; \ -uint64x2_t __reint1_833 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833), vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833)}; \ - __ret_833 = vcmla_rot270_f64(__s0_833, __s1_833, *(float64x1_t *) &__reint1_833); \ - __ret_833; \ +#define vcmla_rot270_lane_f64(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ + float64x1_t __ret_806; \ + float64x1_t __s0_806 = __p0_806; \ + float64x1_t __s1_806 = __p1_806; \ + float64x1_t __s2_806 = __p2_806; \ +float64x1_t __reint_806 = __s2_806; \ +uint64x2_t __reint1_806 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806), vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806)}; \ + __ret_806 = vcmla_rot270_f64(__s0_806, __s1_806, *(float64x1_t *) &__reint1_806); \ + __ret_806; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ - float64x2_t __ret_834; \ - float64x2_t __s0_834 = __p0_834; \ - float64x2_t __s1_834 = __p1_834; \ - float64x1_t __s2_834 = __p2_834; \ -float64x1_t __reint_834 = __s2_834; \ -uint64x2_t __reint1_834 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834), vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834)}; \ - __ret_834 = vcmlaq_rot270_f64(__s0_834, __s1_834, *(float64x2_t *) &__reint1_834); \ - __ret_834; \ +#define vcmlaq_rot270_lane_f64(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ + float64x2_t __ret_807; \ + float64x2_t __s0_807 = __p0_807; \ + float64x2_t __s1_807 = __p1_807; \ + float64x1_t __s2_807 = __p2_807; \ +float64x1_t __reint_807 = __s2_807; \ +uint64x2_t __reint1_807 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807), vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807)}; \ + __ret_807 = vcmlaq_rot270_f64(__s0_807, __s1_807, *(float64x2_t *) &__reint1_807); \ + __ret_807; \ }) #else -#define vcmlaq_rot270_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ - float64x2_t __ret_835; \ - float64x2_t __s0_835 = __p0_835; \ - float64x2_t __s1_835 = __p1_835; \ - float64x1_t __s2_835 = __p2_835; \ - float64x2_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 1, 0); \ - float64x2_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 1, 0); \ -float64x1_t __reint_835 = __s2_835; \ -uint64x2_t __reint1_835 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835)}; \ - __ret_835 = __noswap_vcmlaq_rot270_f64(__rev0_835, __rev1_835, *(float64x2_t *) &__reint1_835); \ - __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 1, 0); \ - __ret_835; \ +#define vcmlaq_rot270_lane_f64(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ + float64x2_t __ret_808; \ + float64x2_t __s0_808 = __p0_808; \ + float64x2_t __s1_808 = __p1_808; \ + float64x1_t __s2_808 = __p2_808; \ + float64x2_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \ + float64x2_t __rev1_808; __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 1, 0); \ +float64x1_t __reint_808 = __s2_808; \ +uint64x2_t __reint1_808 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808)}; \ + __ret_808 = __noswap_vcmlaq_rot270_f64(__rev0_808, __rev1_808, *(float64x2_t *) &__reint1_808); \ + __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \ + __ret_808; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \ - float64x1_t __ret_836; \ - float64x1_t __s0_836 = __p0_836; \ - float64x1_t __s1_836 = __p1_836; \ - float64x2_t __s2_836 = __p2_836; \ -float64x2_t __reint_836 = __s2_836; \ -uint64x2_t __reint1_836 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836), vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836)}; \ - __ret_836 = vcmla_rot270_f64(__s0_836, __s1_836, *(float64x1_t *) &__reint1_836); \ - __ret_836; \ +#define vcmla_rot270_laneq_f64(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ + float64x1_t __ret_809; \ + float64x1_t __s0_809 = __p0_809; \ + float64x1_t __s1_809 = __p1_809; \ + float64x2_t __s2_809 = __p2_809; \ +float64x2_t __reint_809 = __s2_809; \ +uint64x2_t __reint1_809 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809), vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809)}; \ + __ret_809 = vcmla_rot270_f64(__s0_809, __s1_809, *(float64x1_t *) &__reint1_809); \ + __ret_809; \ }) #else -#define vcmla_rot270_laneq_f64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \ - float64x1_t __ret_837; \ - float64x1_t __s0_837 = __p0_837; \ - float64x1_t __s1_837 = __p1_837; \ - float64x2_t __s2_837 = __p2_837; \ - float64x2_t __rev2_837; __rev2_837 = __builtin_shufflevector(__s2_837, __s2_837, 1, 0); \ -float64x2_t __reint_837 = __rev2_837; \ -uint64x2_t __reint1_837 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837)}; \ - __ret_837 = vcmla_rot270_f64(__s0_837, __s1_837, *(float64x1_t *) &__reint1_837); \ - __ret_837; \ +#define vcmla_rot270_laneq_f64(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ + float64x1_t __ret_810; \ + float64x1_t __s0_810 = __p0_810; \ + float64x1_t __s1_810 = __p1_810; \ + float64x2_t __s2_810 = __p2_810; \ + float64x2_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 1, 0); \ +float64x2_t __reint_810 = __rev2_810; \ +uint64x2_t __reint1_810 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810)}; \ + __ret_810 = vcmla_rot270_f64(__s0_810, __s1_810, *(float64x1_t *) &__reint1_810); \ + __ret_810; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \ - float64x2_t __ret_838; \ - float64x2_t __s0_838 = __p0_838; \ - float64x2_t __s1_838 = __p1_838; \ - float64x2_t __s2_838 = __p2_838; \ -float64x2_t __reint_838 = __s2_838; \ -uint64x2_t __reint1_838 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838), vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838)}; \ - __ret_838 = vcmlaq_rot270_f64(__s0_838, __s1_838, *(float64x2_t *) &__reint1_838); \ - __ret_838; \ +#define vcmlaq_rot270_laneq_f64(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ + float64x2_t __ret_811; \ + float64x2_t __s0_811 = __p0_811; \ + float64x2_t __s1_811 = __p1_811; \ + float64x2_t __s2_811 = __p2_811; \ +float64x2_t __reint_811 = __s2_811; \ +uint64x2_t __reint1_811 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811), vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811)}; \ + __ret_811 = vcmlaq_rot270_f64(__s0_811, __s1_811, *(float64x2_t *) &__reint1_811); \ + __ret_811; \ }) #else -#define vcmlaq_rot270_laneq_f64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ - float64x2_t __ret_839; \ - float64x2_t __s0_839 = __p0_839; \ - float64x2_t __s1_839 = __p1_839; \ - float64x2_t __s2_839 = __p2_839; \ - float64x2_t __rev0_839; __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 1, 0); \ - float64x2_t __rev1_839; __rev1_839 = __builtin_shufflevector(__s1_839, __s1_839, 1, 0); \ - float64x2_t __rev2_839; __rev2_839 = __builtin_shufflevector(__s2_839, __s2_839, 1, 0); \ -float64x2_t __reint_839 = __rev2_839; \ -uint64x2_t __reint1_839 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839)}; \ - __ret_839 = __noswap_vcmlaq_rot270_f64(__rev0_839, __rev1_839, *(float64x2_t *) &__reint1_839); \ - __ret_839 = __builtin_shufflevector(__ret_839, __ret_839, 1, 0); \ - __ret_839; \ +#define vcmlaq_rot270_laneq_f64(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ + float64x2_t __ret_812; \ + float64x2_t __s0_812 = __p0_812; \ + float64x2_t __s1_812 = __p1_812; \ + float64x2_t __s2_812 = __p2_812; \ + float64x2_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 1, 0); \ + float64x2_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 1, 0); \ + float64x2_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 1, 0); \ +float64x2_t __reint_812 = __rev2_812; \ +uint64x2_t __reint1_812 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812)}; \ + __ret_812 = __noswap_vcmlaq_rot270_f64(__rev0_812, __rev1_812, *(float64x2_t *) &__reint1_812); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \ + __ret_812; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66140,116 +65970,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t _ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot90_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot90_lane_f64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ - float64x1_t __ret_840; \ - float64x1_t __s0_840 = __p0_840; \ - float64x1_t __s1_840 = __p1_840; \ - float64x1_t __s2_840 = __p2_840; \ -float64x1_t __reint_840 = __s2_840; \ -uint64x2_t __reint1_840 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840), vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840)}; \ - __ret_840 = vcmla_rot90_f64(__s0_840, __s1_840, *(float64x1_t *) &__reint1_840); \ - __ret_840; \ +#define vcmla_rot90_lane_f64(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ + float64x1_t __ret_813; \ + float64x1_t __s0_813 = __p0_813; \ + float64x1_t __s1_813 = __p1_813; \ + float64x1_t __s2_813 = __p2_813; \ +float64x1_t __reint_813 = __s2_813; \ +uint64x2_t __reint1_813 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813), vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813)}; \ + __ret_813 = vcmla_rot90_f64(__s0_813, __s1_813, *(float64x1_t *) &__reint1_813); \ + __ret_813; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ - float64x2_t __ret_841; \ - float64x2_t __s0_841 = __p0_841; \ - float64x2_t __s1_841 = __p1_841; \ - float64x1_t __s2_841 = __p2_841; \ -float64x1_t __reint_841 = __s2_841; \ -uint64x2_t __reint1_841 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841), vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841)}; \ - __ret_841 = vcmlaq_rot90_f64(__s0_841, __s1_841, *(float64x2_t *) &__reint1_841); \ - __ret_841; \ +#define vcmlaq_rot90_lane_f64(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ + float64x2_t __ret_814; \ + float64x2_t __s0_814 = __p0_814; \ + float64x2_t __s1_814 = __p1_814; \ + float64x1_t __s2_814 = __p2_814; \ +float64x1_t __reint_814 = __s2_814; \ +uint64x2_t __reint1_814 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814), vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814)}; \ + __ret_814 = vcmlaq_rot90_f64(__s0_814, __s1_814, *(float64x2_t *) &__reint1_814); \ + __ret_814; \ }) #else -#define vcmlaq_rot90_lane_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ - float64x2_t __ret_842; \ - float64x2_t __s0_842 = __p0_842; \ - float64x2_t __s1_842 = __p1_842; \ - float64x1_t __s2_842 = __p2_842; \ - float64x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \ - float64x2_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \ -float64x1_t __reint_842 = __s2_842; \ -uint64x2_t __reint1_842 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842)}; \ - __ret_842 = __noswap_vcmlaq_rot90_f64(__rev0_842, __rev1_842, *(float64x2_t *) &__reint1_842); \ - __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \ - __ret_842; \ +#define vcmlaq_rot90_lane_f64(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ + float64x2_t __ret_815; \ + float64x2_t __s0_815 = __p0_815; \ + float64x2_t __s1_815 = __p1_815; \ + float64x1_t __s2_815 = __p2_815; \ + float64x2_t __rev0_815; __rev0_815 = __builtin_shufflevector(__s0_815, __s0_815, 1, 0); \ + float64x2_t __rev1_815; __rev1_815 = __builtin_shufflevector(__s1_815, __s1_815, 1, 0); \ +float64x1_t __reint_815 = __s2_815; \ +uint64x2_t __reint1_815 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815)}; \ + __ret_815 = __noswap_vcmlaq_rot90_f64(__rev0_815, __rev1_815, *(float64x2_t *) &__reint1_815); \ + __ret_815 = __builtin_shufflevector(__ret_815, __ret_815, 1, 0); \ + __ret_815; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ - float64x1_t __ret_843; \ - float64x1_t __s0_843 = __p0_843; \ - float64x1_t __s1_843 = __p1_843; \ - float64x2_t __s2_843 = __p2_843; \ -float64x2_t __reint_843 = __s2_843; \ -uint64x2_t __reint1_843 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843), vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843)}; \ - __ret_843 = vcmla_rot90_f64(__s0_843, __s1_843, *(float64x1_t *) &__reint1_843); \ - __ret_843; \ +#define vcmla_rot90_laneq_f64(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ + float64x1_t __ret_816; \ + float64x1_t __s0_816 = __p0_816; \ + float64x1_t __s1_816 = __p1_816; \ + float64x2_t __s2_816 = __p2_816; \ +float64x2_t __reint_816 = __s2_816; \ +uint64x2_t __reint1_816 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816), vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816)}; \ + __ret_816 = vcmla_rot90_f64(__s0_816, __s1_816, *(float64x1_t *) &__reint1_816); \ + __ret_816; \ }) #else -#define vcmla_rot90_laneq_f64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ - float64x1_t __ret_844; \ - float64x1_t __s0_844 = __p0_844; \ - float64x1_t __s1_844 = __p1_844; \ - float64x2_t __s2_844 = __p2_844; \ - float64x2_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \ -float64x2_t __reint_844 = __rev2_844; \ -uint64x2_t __reint1_844 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844)}; \ - __ret_844 = vcmla_rot90_f64(__s0_844, __s1_844, *(float64x1_t *) &__reint1_844); \ - __ret_844; \ +#define vcmla_rot90_laneq_f64(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ + float64x1_t __ret_817; \ + float64x1_t __s0_817 = __p0_817; \ + float64x1_t __s1_817 = __p1_817; \ + float64x2_t __s2_817 = __p2_817; \ + float64x2_t __rev2_817; __rev2_817 = __builtin_shufflevector(__s2_817, __s2_817, 1, 0); \ +float64x2_t __reint_817 = __rev2_817; \ +uint64x2_t __reint1_817 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817)}; \ + __ret_817 = vcmla_rot90_f64(__s0_817, __s1_817, *(float64x1_t *) &__reint1_817); \ + __ret_817; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ - float64x2_t __ret_845; \ - float64x2_t __s0_845 = __p0_845; \ - float64x2_t __s1_845 = __p1_845; \ - float64x2_t __s2_845 = __p2_845; \ -float64x2_t __reint_845 = __s2_845; \ -uint64x2_t __reint1_845 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845), vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845)}; \ - __ret_845 = vcmlaq_rot90_f64(__s0_845, __s1_845, *(float64x2_t *) &__reint1_845); \ - __ret_845; \ +#define vcmlaq_rot90_laneq_f64(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ + float64x2_t __ret_818; \ + float64x2_t __s0_818 = __p0_818; \ + float64x2_t __s1_818 = __p1_818; \ + float64x2_t __s2_818 = __p2_818; \ +float64x2_t __reint_818 = __s2_818; \ +uint64x2_t __reint1_818 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818), vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818)}; \ + __ret_818 = vcmlaq_rot90_f64(__s0_818, __s1_818, *(float64x2_t *) &__reint1_818); \ + __ret_818; \ }) #else -#define vcmlaq_rot90_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ - float64x2_t __ret_846; \ - float64x2_t __s0_846 = __p0_846; \ - float64x2_t __s1_846 = __p1_846; \ - float64x2_t __s2_846 = __p2_846; \ - float64x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ - float64x2_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \ - float64x2_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \ -float64x2_t __reint_846 = __rev2_846; \ -uint64x2_t __reint1_846 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846)}; \ - __ret_846 = __noswap_vcmlaq_rot90_f64(__rev0_846, __rev1_846, *(float64x2_t *) &__reint1_846); \ - __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ - __ret_846; \ +#define vcmlaq_rot90_laneq_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ + float64x2_t __ret_819; \ + float64x2_t __s0_819 = __p0_819; \ + float64x2_t __s1_819 = __p1_819; \ + float64x2_t __s2_819 = __p2_819; \ + float64x2_t __rev0_819; __rev0_819 = __builtin_shufflevector(__s0_819, __s0_819, 1, 0); \ + float64x2_t __rev1_819; __rev1_819 = __builtin_shufflevector(__s1_819, __s1_819, 1, 0); \ + float64x2_t __rev2_819; __rev2_819 = __builtin_shufflevector(__s2_819, __s2_819, 1, 0); \ +float64x2_t __reint_819 = __rev2_819; \ +uint64x2_t __reint1_819 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ + __ret_819 = __noswap_vcmlaq_rot90_f64(__rev0_819, __rev1_819, *(float64x2_t *) &__reint1_819); \ + __ret_819 = __builtin_shufflevector(__ret_819, __ret_819, 1, 0); \ + __ret_819; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__rev0, 41); @@ -66259,13 +66089,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__rev0, 9); @@ -66275,13 +66105,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__rev0, 42); @@ -66290,19 +66120,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd32x_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__rev0, 41); @@ -66312,13 +66142,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__rev0, 9); @@ -66328,13 +66158,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__rev0, 42); @@ -66343,19 +66173,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd32z_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__rev0, 41); @@ -66365,13 +66195,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__rev0, 9); @@ -66381,13 +66211,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__rev0, 42); @@ -66396,19 +66226,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd64x_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__rev0, 41); @@ -66418,13 +66248,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__rev0, 9); @@ -66434,13 +66264,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__rev0, 42); @@ -66449,215 +66279,545 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd64z_f64((int8x8_t)__p0, 10); return __ret; } #endif -#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); +#define vbfdotq_lane_f32(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ + float32x4_t __ret_820; \ + float32x4_t __s0_820 = __p0_820; \ + bfloat16x8_t __s1_820 = __p1_820; \ + bfloat16x4_t __s2_820 = __p2_820; \ +bfloat16x4_t __reint_820 = __s2_820; \ +float32x4_t __reint1_820 = splatq_lane_f32(*(float32x2_t *) &__reint_820, __p3_820); \ + __ret_820 = vbfdotq_f32(__s0_820, __s1_820, *(bfloat16x8_t *) &__reint1_820); \ + __ret_820; \ +}) +#else +#define vbfdotq_lane_f32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ + float32x4_t __ret_821; \ + float32x4_t __s0_821 = __p0_821; \ + bfloat16x8_t __s1_821 = __p1_821; \ + bfloat16x4_t __s2_821 = __p2_821; \ + float32x4_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_821; __rev2_821 = __builtin_shufflevector(__s2_821, __s2_821, 3, 2, 1, 0); \ +bfloat16x4_t __reint_821 = __rev2_821; \ +float32x4_t __reint1_821 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_821, __p3_821); \ + __ret_821 = __noswap_vbfdotq_f32(__rev0_821, __rev1_821, *(bfloat16x8_t *) &__reint1_821); \ + __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 3, 2, 1, 0); \ + __ret_821; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_lane_f32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ + float32x2_t __ret_822; \ + float32x2_t __s0_822 = __p0_822; \ + bfloat16x4_t __s1_822 = __p1_822; \ + bfloat16x4_t __s2_822 = __p2_822; \ +bfloat16x4_t __reint_822 = __s2_822; \ +float32x2_t __reint1_822 = splat_lane_f32(*(float32x2_t *) &__reint_822, __p3_822); \ + __ret_822 = vbfdot_f32(__s0_822, __s1_822, *(bfloat16x4_t *) &__reint1_822); \ + __ret_822; \ +}) +#else +#define vbfdot_lane_f32(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ + float32x2_t __ret_823; \ + float32x2_t __s0_823 = __p0_823; \ + bfloat16x4_t __s1_823 = __p1_823; \ + bfloat16x4_t __s2_823 = __p2_823; \ + float32x2_t __rev0_823; __rev0_823 = __builtin_shufflevector(__s0_823, __s0_823, 1, 0); \ + bfloat16x4_t __rev1_823; __rev1_823 = __builtin_shufflevector(__s1_823, __s1_823, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 3, 2, 1, 0); \ +bfloat16x4_t __reint_823 = __rev2_823; \ +float32x2_t __reint1_823 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_823, __p3_823); \ + __ret_823 = __noswap_vbfdot_f32(__rev0_823, __rev1_823, *(bfloat16x4_t *) &__reint1_823); \ + __ret_823 = __builtin_shufflevector(__ret_823, __ret_823, 1, 0); \ + __ret_823; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_laneq_f32(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ + float32x4_t __ret_824; \ + float32x4_t __s0_824 = __p0_824; \ + bfloat16x8_t __s1_824 = __p1_824; \ + bfloat16x8_t __s2_824 = __p2_824; \ +bfloat16x8_t __reint_824 = __s2_824; \ +float32x4_t __reint1_824 = splatq_laneq_f32(*(float32x4_t *) &__reint_824, __p3_824); \ + __ret_824 = vbfdotq_f32(__s0_824, __s1_824, *(bfloat16x8_t *) &__reint1_824); \ + __ret_824; \ +}) +#else +#define vbfdotq_laneq_f32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ + float32x4_t __ret_825; \ + float32x4_t __s0_825 = __p0_825; \ + bfloat16x8_t __s1_825 = __p1_825; \ + bfloat16x8_t __s2_825 = __p2_825; \ + float32x4_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_825 = __rev2_825; \ +float32x4_t __reint1_825 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_825, __p3_825); \ + __ret_825 = __noswap_vbfdotq_f32(__rev0_825, __rev1_825, *(bfloat16x8_t *) &__reint1_825); \ + __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 3, 2, 1, 0); \ + __ret_825; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_laneq_f32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ + float32x2_t __ret_826; \ + float32x2_t __s0_826 = __p0_826; \ + bfloat16x4_t __s1_826 = __p1_826; \ + bfloat16x8_t __s2_826 = __p2_826; \ +bfloat16x8_t __reint_826 = __s2_826; \ +float32x2_t __reint1_826 = splat_laneq_f32(*(float32x4_t *) &__reint_826, __p3_826); \ + __ret_826 = vbfdot_f32(__s0_826, __s1_826, *(bfloat16x4_t *) &__reint1_826); \ + __ret_826; \ +}) +#else +#define vbfdot_laneq_f32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + float32x2_t __ret_827; \ + float32x2_t __s0_827 = __p0_827; \ + bfloat16x4_t __s1_827 = __p1_827; \ + bfloat16x8_t __s2_827 = __p2_827; \ + float32x2_t __rev0_827; __rev0_827 = __builtin_shufflevector(__s0_827, __s0_827, 1, 0); \ + bfloat16x4_t __rev1_827; __rev1_827 = __builtin_shufflevector(__s1_827, __s1_827, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_827; __rev2_827 = __builtin_shufflevector(__s2_827, __s2_827, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_827 = __rev2_827; \ +float32x2_t __reint1_827 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_827, __p3_827); \ + __ret_827 = __noswap_vbfdot_f32(__rev0_827, __rev1_827, *(bfloat16x4_t *) &__reint1_827); \ + __ret_827 = __builtin_shufflevector(__ret_827, __ret_827, 1, 0); \ + __ret_827; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_lane_f32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + float32x4_t __ret_828; \ + float32x4_t __s0_828 = __p0_828; \ + bfloat16x8_t __s1_828 = __p1_828; \ + bfloat16x4_t __s2_828 = __p2_828; \ + __ret_828 = vbfmlalbq_f32(__s0_828, __s1_828, (bfloat16x8_t) {vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828)}); \ + __ret_828; \ +}) +#else +#define vbfmlalbq_lane_f32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + float32x4_t __ret_829; \ + float32x4_t __s0_829 = __p0_829; \ + bfloat16x8_t __s1_829 = __p1_829; \ + bfloat16x4_t __s2_829 = __p2_829; \ + float32x4_t __rev0_829; __rev0_829 = __builtin_shufflevector(__s0_829, __s0_829, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_829; __rev1_829 = __builtin_shufflevector(__s1_829, __s1_829, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_829; __rev2_829 = __builtin_shufflevector(__s2_829, __s2_829, 3, 2, 1, 0); \ + __ret_829 = __noswap_vbfmlalbq_f32(__rev0_829, __rev1_829, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829)}); \ + __ret_829 = __builtin_shufflevector(__ret_829, __ret_829, 3, 2, 1, 0); \ + __ret_829; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_laneq_f32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + float32x4_t __ret_830; \ + float32x4_t __s0_830 = __p0_830; \ + bfloat16x8_t __s1_830 = __p1_830; \ + bfloat16x8_t __s2_830 = __p2_830; \ + __ret_830 = vbfmlalbq_f32(__s0_830, __s1_830, (bfloat16x8_t) {vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830)}); \ + __ret_830; \ +}) +#else +#define vbfmlalbq_laneq_f32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + float32x4_t __ret_831; \ + float32x4_t __s0_831 = __p0_831; \ + bfloat16x8_t __s1_831 = __p1_831; \ + bfloat16x8_t __s2_831 = __p2_831; \ + float32x4_t __rev0_831; __rev0_831 = __builtin_shufflevector(__s0_831, __s0_831, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_831; __rev1_831 = __builtin_shufflevector(__s1_831, __s1_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_831; __rev2_831 = __builtin_shufflevector(__s2_831, __s2_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_831 = __noswap_vbfmlalbq_f32(__rev0_831, __rev1_831, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831)}); \ + __ret_831 = __builtin_shufflevector(__ret_831, __ret_831, 3, 2, 1, 0); \ + __ret_831; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_lane_f32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + float32x4_t __ret_832; \ + float32x4_t __s0_832 = __p0_832; \ + bfloat16x8_t __s1_832 = __p1_832; \ + bfloat16x4_t __s2_832 = __p2_832; \ + __ret_832 = vbfmlaltq_f32(__s0_832, __s1_832, (bfloat16x8_t) {vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832)}); \ + __ret_832; \ +}) +#else +#define vbfmlaltq_lane_f32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + float32x4_t __ret_833; \ + float32x4_t __s0_833 = __p0_833; \ + bfloat16x8_t __s1_833 = __p1_833; \ + bfloat16x4_t __s2_833 = __p2_833; \ + float32x4_t __rev0_833; __rev0_833 = __builtin_shufflevector(__s0_833, __s0_833, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_833; __rev1_833 = __builtin_shufflevector(__s1_833, __s1_833, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_833; __rev2_833 = __builtin_shufflevector(__s2_833, __s2_833, 3, 2, 1, 0); \ + __ret_833 = __noswap_vbfmlaltq_f32(__rev0_833, __rev1_833, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833)}); \ + __ret_833 = __builtin_shufflevector(__ret_833, __ret_833, 3, 2, 1, 0); \ + __ret_833; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_laneq_f32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + float32x4_t __ret_834; \ + float32x4_t __s0_834 = __p0_834; \ + bfloat16x8_t __s1_834 = __p1_834; \ + bfloat16x8_t __s2_834 = __p2_834; \ + __ret_834 = vbfmlaltq_f32(__s0_834, __s1_834, (bfloat16x8_t) {vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834)}); \ + __ret_834; \ +}) +#else +#define vbfmlaltq_laneq_f32(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ + float32x4_t __ret_835; \ + float32x4_t __s0_835 = __p0_835; \ + bfloat16x8_t __s1_835 = __p1_835; \ + bfloat16x8_t __s2_835 = __p2_835; \ + float32x4_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_835; __rev2_835 = __builtin_shufflevector(__s2_835, __s2_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_835 = __noswap_vbfmlaltq_f32(__rev0_835, __rev1_835, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835)}); \ + __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 3, 2, 1, 0); \ + __ret_835; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_836) { + float32x4_t __ret_836; +bfloat16x4_t __reint_836 = __p0_836; +int32x4_t __reint1_836 = vshll_n_s16(*(int16x4_t *) &__reint_836, 16); + __ret_836 = *(float32x4_t *) &__reint1_836; + return __ret_836; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_837) { + float32x4_t __ret_837; + bfloat16x4_t __rev0_837; __rev0_837 = __builtin_shufflevector(__p0_837, __p0_837, 3, 2, 1, 0); +bfloat16x4_t __reint_837 = __rev0_837; +int32x4_t __reint1_837 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_837, 16); + __ret_837 = *(float32x4_t *) &__reint1_837; + __ret_837 = __builtin_shufflevector(__ret_837, __ret_837, 3, 2, 1, 0); + return __ret_837; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_838) { + float32x4_t __ret_838; +bfloat16x4_t __reint_838 = __p0_838; +int32x4_t __reint1_838 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_838, 16); + __ret_838 = *(float32x4_t *) &__reint1_838; + return __ret_838; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); return __ret; } #else -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrnd_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); return __ret; } #else -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrnda_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vdotq_lane_u32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ + uint32x4_t __ret_839; \ + uint32x4_t __s0_839 = __p0_839; \ + uint8x16_t __s1_839 = __p1_839; \ + uint8x8_t __s2_839 = __p2_839; \ +uint8x8_t __reint_839 = __s2_839; \ +uint32x4_t __reint1_839 = splatq_lane_u32(*(uint32x2_t *) &__reint_839, __p3_839); \ + __ret_839 = vdotq_u32(__s0_839, __s1_839, *(uint8x16_t *) &__reint1_839); \ + __ret_839; \ +}) #else -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdotq_lane_u32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ + uint32x4_t __ret_840; \ + uint32x4_t __s0_840 = __p0_840; \ + uint8x16_t __s1_840 = __p1_840; \ + uint8x8_t __s2_840 = __p2_840; \ + uint32x4_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 3, 2, 1, 0); \ + uint8x16_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_840; __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_840 = __rev2_840; \ +uint32x4_t __reint1_840 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_840, __p3_840); \ + __ret_840 = __noswap_vdotq_u32(__rev0_840, __rev1_840, *(uint8x16_t *) &__reint1_840); \ + __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \ + __ret_840; \ +}) #endif -__ai float64x1_t vrndi_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vdotq_lane_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + int32x4_t __ret_841; \ + int32x4_t __s0_841 = __p0_841; \ + int8x16_t __s1_841 = __p1_841; \ + int8x8_t __s2_841 = __p2_841; \ +int8x8_t __reint_841 = __s2_841; \ +int32x4_t __reint1_841 = splatq_lane_s32(*(int32x2_t *) &__reint_841, __p3_841); \ + __ret_841 = vdotq_s32(__s0_841, __s1_841, *(int8x16_t *) &__reint1_841); \ + __ret_841; \ +}) #else -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdotq_lane_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + int32x4_t __ret_842; \ + int32x4_t __s0_842 = __p0_842; \ + int8x16_t __s1_842 = __p1_842; \ + int8x8_t __s2_842 = __p2_842; \ + int32x4_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 3, 2, 1, 0); \ + int8x16_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_842; __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_842 = __rev2_842; \ +int32x4_t __reint1_842 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_842, __p3_842); \ + __ret_842 = __noswap_vdotq_s32(__rev0_842, __rev1_842, *(int8x16_t *) &__reint1_842); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 3, 2, 1, 0); \ + __ret_842; \ +}) #endif -__ai float64x1_t vrndm_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vdot_lane_u32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + uint32x2_t __ret_843; \ + uint32x2_t __s0_843 = __p0_843; \ + uint8x8_t __s1_843 = __p1_843; \ + uint8x8_t __s2_843 = __p2_843; \ +uint8x8_t __reint_843 = __s2_843; \ +uint32x2_t __reint1_843 = splat_lane_u32(*(uint32x2_t *) &__reint_843, __p3_843); \ + __ret_843 = vdot_u32(__s0_843, __s1_843, *(uint8x8_t *) &__reint1_843); \ + __ret_843; \ +}) #else -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdot_lane_u32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + uint32x2_t __ret_844; \ + uint32x2_t __s0_844 = __p0_844; \ + uint8x8_t __s1_844 = __p1_844; \ + uint8x8_t __s2_844 = __p2_844; \ + uint32x2_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 1, 0); \ + uint8x8_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_844 = __rev2_844; \ +uint32x2_t __reint1_844 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_844, __p3_844); \ + __ret_844 = __noswap_vdot_u32(__rev0_844, __rev1_844, *(uint8x8_t *) &__reint1_844); \ + __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 1, 0); \ + __ret_844; \ +}) #endif -__ai float64x1_t vrndn_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vdot_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + int32x2_t __ret_845; \ + int32x2_t __s0_845 = __p0_845; \ + int8x8_t __s1_845 = __p1_845; \ + int8x8_t __s2_845 = __p2_845; \ +int8x8_t __reint_845 = __s2_845; \ +int32x2_t __reint1_845 = splat_lane_s32(*(int32x2_t *) &__reint_845, __p3_845); \ + __ret_845 = vdot_s32(__s0_845, __s1_845, *(int8x8_t *) &__reint1_845); \ + __ret_845; \ +}) #else -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdot_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + int32x2_t __ret_846; \ + int32x2_t __s0_846 = __p0_846; \ + int8x8_t __s1_846 = __p1_846; \ + int8x8_t __s2_846 = __p2_846; \ + int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + int8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_846 = __rev2_846; \ +int32x2_t __reint1_846 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_846, __p3_846); \ + __ret_846 = __noswap_vdot_s32(__rev0_846, __rev1_846, *(int8x8_t *) &__reint1_846); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) #endif -__ai float64x1_t vrndp_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vmulq_lane_f16(__p0_847, __p1_847, __p2_847) __extension__ ({ \ + float16x8_t __ret_847; \ + float16x8_t __s0_847 = __p0_847; \ + float16x4_t __s1_847 = __p1_847; \ + __ret_847 = __s0_847 * splatq_lane_f16(__s1_847, __p2_847); \ + __ret_847; \ +}) #else -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_lane_f16(__p0_848, __p1_848, __p2_848) __extension__ ({ \ + float16x8_t __ret_848; \ + float16x8_t __s0_848 = __p0_848; \ + float16x4_t __s1_848 = __p1_848; \ + float16x8_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \ + __ret_848 = __rev0_848 * __noswap_splatq_lane_f16(__rev1_848, __p2_848); \ + __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_848; \ +}) #endif -__ai float64x1_t vrndx_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); - return __ret; -} -#endif -#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vmul_lane_f16(__p0_849, __p1_849, __p2_849) __extension__ ({ \ + float16x4_t __ret_849; \ + float16x4_t __s0_849 = __p0_849; \ + float16x4_t __s1_849 = __p1_849; \ + __ret_849 = __s0_849 * splat_lane_f16(__s1_849, __p2_849); \ + __ret_849; \ +}) #else -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmul_lane_f16(__p0_850, __p1_850, __p2_850) __extension__ ({ \ + float16x4_t __ret_850; \ + float16x4_t __s0_850 = __p0_850; \ + float16x4_t __s1_850 = __p1_850; \ + float16x4_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 3, 2, 1, 0); \ + float16x4_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 3, 2, 1, 0); \ + __ret_850 = __rev0_850 * __noswap_splat_lane_f16(__rev1_850, __p2_850); \ + __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 3, 2, 1, 0); \ + __ret_850; \ +}) #endif -__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vsudotq_lane_s32(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \ + int32x4_t __ret_851; \ + int32x4_t __s0_851 = __p0_851; \ + int8x16_t __s1_851 = __p1_851; \ + uint8x8_t __s2_851 = __p2_851; \ +uint8x8_t __reint_851 = __s2_851; \ + __ret_851 = vusdotq_s32(__s0_851, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_851, __p3_851)), __s1_851); \ + __ret_851; \ +}) #else -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vsudotq_lane_s32(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \ + int32x4_t __ret_852; \ + int32x4_t __s0_852 = __p0_852; \ + int8x16_t __s1_852 = __p1_852; \ + uint8x8_t __s2_852 = __p2_852; \ + int32x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ + int8x16_t __rev1_852; __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_852; __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_852 = __rev2_852; \ + __ret_852 = __noswap_vusdotq_s32(__rev0_852, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_852, __p3_852)), __rev1_852); \ + __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \ + __ret_852; \ +}) #endif -__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +#define vsudot_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + int32x2_t __ret_853; \ + int32x2_t __s0_853 = __p0_853; \ + int8x8_t __s1_853 = __p1_853; \ + uint8x8_t __s2_853 = __p2_853; \ +uint8x8_t __reint_853 = __s2_853; \ + __ret_853 = vusdot_s32(__s0_853, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_853, __p3_853)), __s1_853); \ + __ret_853; \ +}) +#else +#define vsudot_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + int32x2_t __ret_854; \ + int32x2_t __s0_854 = __p0_854; \ + int8x8_t __s1_854 = __p1_854; \ + uint8x8_t __s2_854 = __p2_854; \ + int32x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + int8x8_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_854 = __rev2_854; \ + __ret_854 = __noswap_vusdot_s32(__rev0_854, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_854, __p3_854)), __rev1_854); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + int32x4_t __ret_855; \ + int32x4_t __s0_855 = __p0_855; \ + uint8x16_t __s1_855 = __p1_855; \ + int8x8_t __s2_855 = __p2_855; \ +int8x8_t __reint_855 = __s2_855; \ + __ret_855 = vusdotq_s32(__s0_855, __s1_855, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_855, __p3_855))); \ + __ret_855; \ +}) +#else +#define vusdotq_lane_s32(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + int32x4_t __ret_856; \ + int32x4_t __s0_856 = __p0_856; \ + uint8x16_t __s1_856 = __p1_856; \ + int8x8_t __s2_856 = __p2_856; \ + int32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + uint8x16_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_856 = __rev2_856; \ + __ret_856 = __noswap_vusdotq_s32(__rev0_856, __rev1_856, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_856, __p3_856))); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ + int32x2_t __ret_857; \ + int32x2_t __s0_857 = __p0_857; \ + uint8x8_t __s1_857 = __p1_857; \ + int8x8_t __s2_857 = __p2_857; \ +int8x8_t __reint_857 = __s2_857; \ + __ret_857 = vusdot_s32(__s0_857, __s1_857, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_857, __p3_857))); \ + __ret_857; \ +}) +#else +#define vusdot_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ + int32x2_t __ret_858; \ + int32x2_t __s0_858 = __p0_858; \ + uint8x8_t __s1_858 = __p1_858; \ + int8x8_t __s2_858 = __p2_858; \ + int32x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ + uint8x8_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_858 = __rev2_858; \ + __ret_858 = __noswap_vusdot_s32(__rev0_858, __rev1_858, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_858, __p3_858))); \ + __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ + __ret_858; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 + vabdq_u8(__p1, __p2); return __ret; } #else -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66669,13 +66829,13 @@ __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdq_u32(__p1, __p2); return __ret; } #else -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66687,13 +66847,13 @@ __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdq_u16(__p1, __p2); return __ret; } #else -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66705,13 +66865,13 @@ __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 + vabdq_s8(__p1, __p2); return __ret; } #else -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66723,13 +66883,13 @@ __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdq_s32(__p1, __p2); return __ret; } #else -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66741,13 +66901,13 @@ __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdq_s16(__p1, __p2); return __ret; } #else -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66759,13 +66919,13 @@ __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 + vabd_u8(__p1, __p2); return __ret; } #else -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66777,13 +66937,13 @@ __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 + vabd_u32(__p1, __p2); return __ret; } #else -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66795,13 +66955,13 @@ __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 + vabd_u16(__p1, __p2); return __ret; } #else -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66813,13 +66973,13 @@ __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 + vabd_s8(__p1, __p2); return __ret; } #else -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66831,13 +66991,13 @@ __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 + vabd_s32(__p1, __p2); return __ret; } #else -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66849,13 +67009,13 @@ __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 + vabd_s16(__p1, __p2); return __ret; } #else -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66867,13 +67027,13 @@ __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); return __ret; } #else -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66881,7 +67041,7 @@ __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); return __ret; @@ -66889,13 +67049,13 @@ __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); return __ret; } #else -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66903,7 +67063,7 @@ __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); return __ret; @@ -66911,13 +67071,13 @@ __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); return __ret; } #else -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66925,7 +67085,7 @@ __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); return __ret; @@ -66933,13 +67093,13 @@ __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); return __ret; } #else -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66947,7 +67107,7 @@ __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); return __ret; @@ -66955,13 +67115,13 @@ __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); return __ret; } #else -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66969,7 +67129,7 @@ __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); return __ret; @@ -66977,13 +67137,13 @@ __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); return __ret; } #else -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66991,7 +67151,7 @@ __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); return __ret; @@ -66999,13 +67159,13 @@ __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = vmovl_u8(__p0) + vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67016,13 +67176,13 @@ __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = vmovl_u32(__p0) + vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67033,13 +67193,13 @@ __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = vmovl_u16(__p0) + vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67050,13 +67210,13 @@ __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = vmovl_s8(__p0) + vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67067,13 +67227,13 @@ __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = vmovl_s32(__p0) + vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67084,13 +67244,13 @@ __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = vmovl_s16(__p0) + vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67101,13 +67261,13 @@ __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67118,13 +67278,13 @@ __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67135,13 +67295,13 @@ __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67152,13 +67312,13 @@ __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67169,13 +67329,13 @@ __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67186,13 +67346,13 @@ __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67203,71 +67363,71 @@ __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \ - float16_t __ret_847; \ - float16x4_t __s0_847 = __p0_847; \ -float16x4_t __reint_847 = __s0_847; \ -int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \ - __ret_847 = *(float16_t *) &__reint1_847; \ - __ret_847; \ +#define vget_lane_f16(__p0_859, __p1_859) __extension__ ({ \ + float16_t __ret_859; \ + float16x4_t __s0_859 = __p0_859; \ +float16x4_t __reint_859 = __s0_859; \ +int16_t __reint1_859 = vget_lane_s16(*(int16x4_t *) &__reint_859, __p1_859); \ + __ret_859 = *(float16_t *) &__reint1_859; \ + __ret_859; \ }) #else -#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \ - float16_t __ret_848; \ - float16x4_t __s0_848 = __p0_848; \ - float16x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ -float16x4_t __reint_848 = __rev0_848; \ -int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \ - __ret_848 = *(float16_t *) &__reint1_848; \ - __ret_848; \ +#define vget_lane_f16(__p0_860, __p1_860) __extension__ ({ \ + float16_t __ret_860; \ + float16x4_t __s0_860 = __p0_860; \ + float16x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ +float16x4_t __reint_860 = __rev0_860; \ +int16_t __reint1_860 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_860, __p1_860); \ + __ret_860 = *(float16_t *) &__reint1_860; \ + __ret_860; \ }) -#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \ - float16_t __ret_849; \ - float16x4_t __s0_849 = __p0_849; \ -float16x4_t __reint_849 = __s0_849; \ -int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \ - __ret_849 = *(float16_t *) &__reint1_849; \ - __ret_849; \ +#define __noswap_vget_lane_f16(__p0_861, __p1_861) __extension__ ({ \ + float16_t __ret_861; \ + float16x4_t __s0_861 = __p0_861; \ +float16x4_t __reint_861 = __s0_861; \ +int16_t __reint1_861 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_861, __p1_861); \ + __ret_861 = *(float16_t *) &__reint1_861; \ + __ret_861; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \ - float16_t __ret_850; \ - float16x8_t __s0_850 = __p0_850; \ -float16x8_t __reint_850 = __s0_850; \ -int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \ - __ret_850 = *(float16_t *) &__reint1_850; \ - __ret_850; \ +#define vgetq_lane_f16(__p0_862, __p1_862) __extension__ ({ \ + float16_t __ret_862; \ + float16x8_t __s0_862 = __p0_862; \ +float16x8_t __reint_862 = __s0_862; \ +int16_t __reint1_862 = vgetq_lane_s16(*(int16x8_t *) &__reint_862, __p1_862); \ + __ret_862 = *(float16_t *) &__reint1_862; \ + __ret_862; \ }) #else -#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \ - float16_t __ret_851; \ - float16x8_t __s0_851 = __p0_851; \ - float16x8_t __rev0_851; __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_851 = __rev0_851; \ -int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \ - __ret_851 = *(float16_t *) &__reint1_851; \ - __ret_851; \ +#define vgetq_lane_f16(__p0_863, __p1_863) __extension__ ({ \ + float16_t __ret_863; \ + float16x8_t __s0_863 = __p0_863; \ + float16x8_t __rev0_863; __rev0_863 = __builtin_shufflevector(__s0_863, __s0_863, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_863 = __rev0_863; \ +int16_t __reint1_863 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_863, __p1_863); \ + __ret_863 = *(float16_t *) &__reint1_863; \ + __ret_863; \ }) -#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \ - float16_t __ret_852; \ - float16x8_t __s0_852 = __p0_852; \ -float16x8_t __reint_852 = __s0_852; \ -int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \ - __ret_852 = *(float16_t *) &__reint1_852; \ - __ret_852; \ +#define __noswap_vgetq_lane_f16(__p0_864, __p1_864) __extension__ ({ \ + float16_t __ret_864; \ + float16x8_t __s0_864 = __p0_864; \ +float16x8_t __reint_864 = __s0_864; \ +int16_t __reint1_864 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_864, __p1_864); \ + __ret_864 = *(float16_t *) &__reint1_864; \ + __ret_864; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vmull_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67276,7 +67436,7 @@ __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vmull_u8(__p1, __p2); return __ret; @@ -67284,13 +67444,13 @@ __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67299,7 +67459,7 @@ __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, __p2); return __ret; @@ -67307,13 +67467,13 @@ __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67322,7 +67482,7 @@ __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, __p2); return __ret; @@ -67330,13 +67490,13 @@ __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vmull_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67345,7 +67505,7 @@ __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vmull_s8(__p1, __p2); return __ret; @@ -67353,13 +67513,13 @@ __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67368,7 +67528,7 @@ __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, __p2); return __ret; @@ -67376,13 +67536,13 @@ __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67391,7 +67551,7 @@ __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, __p2); return __ret; @@ -67399,109 +67559,109 @@ __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ - uint64x2_t __ret_853; \ - uint64x2_t __s0_853 = __p0_853; \ - uint32x2_t __s1_853 = __p1_853; \ - uint32x2_t __s2_853 = __p2_853; \ - __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \ - __ret_853; \ +#define vmlal_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + uint64x2_t __ret_865; \ + uint64x2_t __s0_865 = __p0_865; \ + uint32x2_t __s1_865 = __p1_865; \ + uint32x2_t __s2_865 = __p2_865; \ + __ret_865 = __s0_865 + vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \ + __ret_865; \ }) #else -#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ - uint64x2_t __ret_854; \ - uint64x2_t __s0_854 = __p0_854; \ - uint32x2_t __s1_854 = __p1_854; \ - uint32x2_t __s2_854 = __p2_854; \ - uint64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ - uint32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \ - uint32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \ - __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \ - __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ - __ret_854; \ +#define vmlal_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + uint64x2_t __ret_866; \ + uint64x2_t __s0_866 = __p0_866; \ + uint32x2_t __s1_866 = __p1_866; \ + uint32x2_t __s2_866 = __p2_866; \ + uint64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ + uint32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ + uint32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ + __ret_866 = __rev0_866 + __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \ + __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ + __ret_866; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ - uint32x4_t __ret_855; \ - uint32x4_t __s0_855 = __p0_855; \ - uint16x4_t __s1_855 = __p1_855; \ - uint16x4_t __s2_855 = __p2_855; \ - __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \ - __ret_855; \ +#define vmlal_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + uint32x4_t __ret_867; \ + uint32x4_t __s0_867 = __p0_867; \ + uint16x4_t __s1_867 = __p1_867; \ + uint16x4_t __s2_867 = __p2_867; \ + __ret_867 = __s0_867 + vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \ + __ret_867; \ }) #else -#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ - uint32x4_t __ret_856; \ - uint32x4_t __s0_856 = __p0_856; \ - uint16x4_t __s1_856 = __p1_856; \ - uint16x4_t __s2_856 = __p2_856; \ - uint32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ - uint16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \ - uint16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \ - __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \ - __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ - __ret_856; \ +#define vmlal_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + uint32x4_t __ret_868; \ + uint32x4_t __s0_868 = __p0_868; \ + uint16x4_t __s1_868 = __p1_868; \ + uint16x4_t __s2_868 = __p2_868; \ + uint32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + uint16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ + uint16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ + __ret_868 = __rev0_868 + __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \ + __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ + __ret_868; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ - int64x2_t __ret_857; \ - int64x2_t __s0_857 = __p0_857; \ - int32x2_t __s1_857 = __p1_857; \ - int32x2_t __s2_857 = __p2_857; \ - __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \ - __ret_857; \ +#define vmlal_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ + int64x2_t __ret_869; \ + int64x2_t __s0_869 = __p0_869; \ + int32x2_t __s1_869 = __p1_869; \ + int32x2_t __s2_869 = __p2_869; \ + __ret_869 = __s0_869 + vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \ + __ret_869; \ }) #else -#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ - int64x2_t __ret_858; \ - int64x2_t __s0_858 = __p0_858; \ - int32x2_t __s1_858 = __p1_858; \ - int32x2_t __s2_858 = __p2_858; \ - int64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ - int32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ - int32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ - __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \ - __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ - __ret_858; \ +#define vmlal_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ + int64x2_t __ret_870; \ + int64x2_t __s0_870 = __p0_870; \ + int32x2_t __s1_870 = __p1_870; \ + int32x2_t __s2_870 = __p2_870; \ + int64x2_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \ + int32x2_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \ + int32x2_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \ + __ret_870 = __rev0_870 + __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \ + __ret_870; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ - int32x4_t __ret_859; \ - int32x4_t __s0_859 = __p0_859; \ - int16x4_t __s1_859 = __p1_859; \ - int16x4_t __s2_859 = __p2_859; \ - __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \ - __ret_859; \ +#define vmlal_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ + int32x4_t __ret_871; \ + int32x4_t __s0_871 = __p0_871; \ + int16x4_t __s1_871 = __p1_871; \ + int16x4_t __s2_871 = __p2_871; \ + __ret_871 = __s0_871 + vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \ + __ret_871; \ }) #else -#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ - int32x4_t __ret_860; \ - int32x4_t __s0_860 = __p0_860; \ - int16x4_t __s1_860 = __p1_860; \ - int16x4_t __s2_860 = __p2_860; \ - int32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ - int16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ - int16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ - __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \ - __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \ - __ret_860; \ +#define vmlal_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ + int32x4_t __ret_872; \ + int32x4_t __s0_872 = __p0_872; \ + int16x4_t __s1_872 = __p1_872; \ + int16x4_t __s2_872 = __p2_872; \ + int32x4_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \ + int16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ + int16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ + __ret_872 = __rev0_872 + __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \ + __ret_872; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67509,7 +67669,7 @@ __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; @@ -67517,13 +67677,13 @@ __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67531,7 +67691,7 @@ __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67539,13 +67699,13 @@ __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67553,7 +67713,7 @@ __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -67561,13 +67721,13 @@ __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67575,7 +67735,7 @@ __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67583,13 +67743,13 @@ __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - vmull_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67598,7 +67758,7 @@ __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - __noswap_vmull_u8(__p1, __p2); return __ret; @@ -67606,13 +67766,13 @@ __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67621,7 +67781,7 @@ __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, __p2); return __ret; @@ -67629,13 +67789,13 @@ __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67644,7 +67804,7 @@ __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, __p2); return __ret; @@ -67652,13 +67812,13 @@ __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - vmull_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67667,7 +67827,7 @@ __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - __noswap_vmull_s8(__p1, __p2); return __ret; @@ -67675,13 +67835,13 @@ __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67690,7 +67850,7 @@ __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, __p2); return __ret; @@ -67698,13 +67858,13 @@ __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67713,7 +67873,7 @@ __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, __p2); return __ret; @@ -67721,109 +67881,109 @@ __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ - uint64x2_t __ret_861; \ - uint64x2_t __s0_861 = __p0_861; \ - uint32x2_t __s1_861 = __p1_861; \ - uint32x2_t __s2_861 = __p2_861; \ - __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \ - __ret_861; \ +#define vmlsl_lane_u32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + uint64x2_t __ret_873; \ + uint64x2_t __s0_873 = __p0_873; \ + uint32x2_t __s1_873 = __p1_873; \ + uint32x2_t __s2_873 = __p2_873; \ + __ret_873 = __s0_873 - vmull_u32(__s1_873, splat_lane_u32(__s2_873, __p3_873)); \ + __ret_873; \ }) #else -#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ - uint64x2_t __ret_862; \ - uint64x2_t __s0_862 = __p0_862; \ - uint32x2_t __s1_862 = __p1_862; \ - uint32x2_t __s2_862 = __p2_862; \ - uint64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ - uint32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ - uint32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ - __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \ - __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ - __ret_862; \ +#define vmlsl_lane_u32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + uint64x2_t __ret_874; \ + uint64x2_t __s0_874 = __p0_874; \ + uint32x2_t __s1_874 = __p1_874; \ + uint32x2_t __s2_874 = __p2_874; \ + uint64x2_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 1, 0); \ + uint32x2_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 1, 0); \ + uint32x2_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 1, 0); \ + __ret_874 = __rev0_874 - __noswap_vmull_u32(__rev1_874, __noswap_splat_lane_u32(__rev2_874, __p3_874)); \ + __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 1, 0); \ + __ret_874; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ - uint32x4_t __ret_863; \ - uint32x4_t __s0_863 = __p0_863; \ - uint16x4_t __s1_863 = __p1_863; \ - uint16x4_t __s2_863 = __p2_863; \ - __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \ - __ret_863; \ +#define vmlsl_lane_u16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + uint32x4_t __ret_875; \ + uint32x4_t __s0_875 = __p0_875; \ + uint16x4_t __s1_875 = __p1_875; \ + uint16x4_t __s2_875 = __p2_875; \ + __ret_875 = __s0_875 - vmull_u16(__s1_875, splat_lane_u16(__s2_875, __p3_875)); \ + __ret_875; \ }) #else -#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ - uint32x4_t __ret_864; \ - uint32x4_t __s0_864 = __p0_864; \ - uint16x4_t __s1_864 = __p1_864; \ - uint16x4_t __s2_864 = __p2_864; \ - uint32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ - uint16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ - uint16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ - __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \ - __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ - __ret_864; \ +#define vmlsl_lane_u16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + uint32x4_t __ret_876; \ + uint32x4_t __s0_876 = __p0_876; \ + uint16x4_t __s1_876 = __p1_876; \ + uint16x4_t __s2_876 = __p2_876; \ + uint32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ + uint16x4_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \ + uint16x4_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \ + __ret_876 = __rev0_876 - __noswap_vmull_u16(__rev1_876, __noswap_splat_lane_u16(__rev2_876, __p3_876)); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ + __ret_876; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ - int64x2_t __ret_865; \ - int64x2_t __s0_865 = __p0_865; \ - int32x2_t __s1_865 = __p1_865; \ - int32x2_t __s2_865 = __p2_865; \ - __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \ - __ret_865; \ +#define vmlsl_lane_s32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ + int64x2_t __ret_877; \ + int64x2_t __s0_877 = __p0_877; \ + int32x2_t __s1_877 = __p1_877; \ + int32x2_t __s2_877 = __p2_877; \ + __ret_877 = __s0_877 - vmull_s32(__s1_877, splat_lane_s32(__s2_877, __p3_877)); \ + __ret_877; \ }) #else -#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ - int64x2_t __ret_866; \ - int64x2_t __s0_866 = __p0_866; \ - int32x2_t __s1_866 = __p1_866; \ - int32x2_t __s2_866 = __p2_866; \ - int64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ - int32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ - int32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ - __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \ - __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ - __ret_866; \ +#define vmlsl_lane_s32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ + int64x2_t __ret_878; \ + int64x2_t __s0_878 = __p0_878; \ + int32x2_t __s1_878 = __p1_878; \ + int32x2_t __s2_878 = __p2_878; \ + int64x2_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 1, 0); \ + int32x2_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 1, 0); \ + int32x2_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 1, 0); \ + __ret_878 = __rev0_878 - __noswap_vmull_s32(__rev1_878, __noswap_splat_lane_s32(__rev2_878, __p3_878)); \ + __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 1, 0); \ + __ret_878; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ - int32x4_t __ret_867; \ - int32x4_t __s0_867 = __p0_867; \ - int16x4_t __s1_867 = __p1_867; \ - int16x4_t __s2_867 = __p2_867; \ - __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \ - __ret_867; \ +#define vmlsl_lane_s16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ + int32x4_t __ret_879; \ + int32x4_t __s0_879 = __p0_879; \ + int16x4_t __s1_879 = __p1_879; \ + int16x4_t __s2_879 = __p2_879; \ + __ret_879 = __s0_879 - vmull_s16(__s1_879, splat_lane_s16(__s2_879, __p3_879)); \ + __ret_879; \ }) #else -#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ - int32x4_t __ret_868; \ - int32x4_t __s0_868 = __p0_868; \ - int16x4_t __s1_868 = __p1_868; \ - int16x4_t __s2_868 = __p2_868; \ - int32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ - int16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ - int16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ - __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \ - __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ - __ret_868; \ +#define vmlsl_lane_s16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ + int32x4_t __ret_880; \ + int32x4_t __s0_880 = __p0_880; \ + int16x4_t __s1_880 = __p1_880; \ + int16x4_t __s2_880 = __p2_880; \ + int32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ + int16x4_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \ + int16x4_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 3, 2, 1, 0); \ + __ret_880 = __rev0_880 - __noswap_vmull_s16(__rev1_880, __noswap_splat_lane_s16(__rev2_880, __p3_880)); \ + __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ + __ret_880; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67831,7 +67991,7 @@ __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; @@ -67839,13 +67999,13 @@ __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67853,7 +68013,7 @@ __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67861,13 +68021,13 @@ __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67875,7 +68035,7 @@ __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -67883,13 +68043,13 @@ __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67897,7 +68057,7 @@ __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67905,246 +68065,504 @@ __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \ - float16x4_t __ret_869; \ - float16_t __s0_869 = __p0_869; \ - float16x4_t __s1_869 = __p1_869; \ -float16_t __reint_869 = __s0_869; \ -float16x4_t __reint1_869 = __s1_869; \ -int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \ - __ret_869 = *(float16x4_t *) &__reint2_869; \ - __ret_869; \ -}) -#else -#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \ - float16x4_t __ret_870; \ - float16_t __s0_870 = __p0_870; \ - float16x4_t __s1_870 = __p1_870; \ - float16x4_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \ -float16_t __reint_870 = __s0_870; \ -float16x4_t __reint1_870 = __rev1_870; \ -int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \ - __ret_870 = *(float16x4_t *) &__reint2_870; \ - __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \ - __ret_870; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \ - float16x8_t __ret_871; \ - float16_t __s0_871 = __p0_871; \ - float16x8_t __s1_871 = __p1_871; \ -float16_t __reint_871 = __s0_871; \ -float16x8_t __reint1_871 = __s1_871; \ -int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \ - __ret_871 = *(float16x8_t *) &__reint2_871; \ - __ret_871; \ -}) -#else -#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \ - float16x8_t __ret_872; \ - float16_t __s0_872 = __p0_872; \ - float16x8_t __s1_872 = __p1_872; \ - float16x8_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16_t __reint_872 = __s0_872; \ -float16x8_t __reint1_872 = __rev1_872; \ -int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \ - __ret_872 = *(float16x8_t *) &__reint2_872; \ - __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_872; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ - float32x4_t __ret_873; \ - float32x4_t __s0_873 = __p0_873; \ - bfloat16x8_t __s1_873 = __p1_873; \ - bfloat16x4_t __s2_873 = __p2_873; \ - __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \ - __ret_873; \ -}) -#else -#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ - float32x4_t __ret_874; \ - float32x4_t __s0_874 = __p0_874; \ - bfloat16x8_t __s1_874 = __p1_874; \ - bfloat16x4_t __s2_874 = __p2_874; \ - float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \ - __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \ - __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ - __ret_874; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ - float32x4_t __ret_875; \ - float32x4_t __s0_875 = __p0_875; \ - bfloat16x8_t __s1_875 = __p1_875; \ - bfloat16x8_t __s2_875 = __p2_875; \ - __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \ - __ret_875; \ -}) -#else -#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ - float32x4_t __ret_876; \ - float32x4_t __s0_876 = __p0_876; \ - bfloat16x8_t __s1_876 = __p1_876; \ - bfloat16x8_t __s2_876 = __p2_876; \ - float32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \ - __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ - __ret_876; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ - float32x4_t __ret_877; \ - float32x4_t __s0_877 = __p0_877; \ - bfloat16x8_t __s1_877 = __p1_877; \ - bfloat16x4_t __s2_877 = __p2_877; \ - __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ - __ret_877; \ -}) -#else -#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ - float32x4_t __ret_878; \ - float32x4_t __s0_878 = __p0_878; \ - bfloat16x8_t __s1_878 = __p1_878; \ - bfloat16x4_t __s2_878 = __p2_878; \ - float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \ - __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ - __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ - __ret_878; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ - float32x4_t __ret_879; \ - float32x4_t __s0_879 = __p0_879; \ - bfloat16x8_t __s1_879 = __p1_879; \ - bfloat16x8_t __s2_879 = __p2_879; \ - __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ - __ret_879; \ -}) -#else -#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ - float32x4_t __ret_880; \ - float32x4_t __s0_880 = __p0_880; \ - bfloat16x8_t __s1_880 = __p1_880; \ - bfloat16x8_t __s2_880 = __p2_880; \ - float32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ - __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ - __ret_880; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsudotq_lane_s32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ - int32x4_t __ret_881; \ - int32x4_t __s0_881 = __p0_881; \ - int8x16_t __s1_881 = __p1_881; \ - uint8x8_t __s2_881 = __p2_881; \ -uint8x8_t __reint_881 = __s2_881; \ - __ret_881 = vusdotq_s32(__s0_881, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_881, __p3_881)), __s1_881); \ +#define vset_lane_f16(__p0_881, __p1_881, __p2_881) __extension__ ({ \ + float16x4_t __ret_881; \ + float16_t __s0_881 = __p0_881; \ + float16x4_t __s1_881 = __p1_881; \ +float16_t __reint_881 = __s0_881; \ +float16x4_t __reint1_881 = __s1_881; \ +int16x4_t __reint2_881 = vset_lane_s16(*(int16_t *) &__reint_881, *(int16x4_t *) &__reint1_881, __p2_881); \ + __ret_881 = *(float16x4_t *) &__reint2_881; \ __ret_881; \ }) #else -#define vsudotq_lane_s32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ - int32x4_t __ret_882; \ - int32x4_t __s0_882 = __p0_882; \ - int8x16_t __s1_882 = __p1_882; \ - uint8x8_t __s2_882 = __p2_882; \ - int32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ - int8x16_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_882 = __rev2_882; \ - __ret_882 = __noswap_vusdotq_s32(__rev0_882, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_882, __p3_882)), __rev1_882); \ +#define vset_lane_f16(__p0_882, __p1_882, __p2_882) __extension__ ({ \ + float16x4_t __ret_882; \ + float16_t __s0_882 = __p0_882; \ + float16x4_t __s1_882 = __p1_882; \ + float16x4_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 3, 2, 1, 0); \ +float16_t __reint_882 = __s0_882; \ +float16x4_t __reint1_882 = __rev1_882; \ +int16x4_t __reint2_882 = __noswap_vset_lane_s16(*(int16_t *) &__reint_882, *(int16x4_t *) &__reint1_882, __p2_882); \ + __ret_882 = *(float16x4_t *) &__reint2_882; \ __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ __ret_882; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsudot_lane_s32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ - int32x2_t __ret_883; \ - int32x2_t __s0_883 = __p0_883; \ - int8x8_t __s1_883 = __p1_883; \ - uint8x8_t __s2_883 = __p2_883; \ -uint8x8_t __reint_883 = __s2_883; \ - __ret_883 = vusdot_s32(__s0_883, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_883, __p3_883)), __s1_883); \ +#define vsetq_lane_f16(__p0_883, __p1_883, __p2_883) __extension__ ({ \ + float16x8_t __ret_883; \ + float16_t __s0_883 = __p0_883; \ + float16x8_t __s1_883 = __p1_883; \ +float16_t __reint_883 = __s0_883; \ +float16x8_t __reint1_883 = __s1_883; \ +int16x8_t __reint2_883 = vsetq_lane_s16(*(int16_t *) &__reint_883, *(int16x8_t *) &__reint1_883, __p2_883); \ + __ret_883 = *(float16x8_t *) &__reint2_883; \ __ret_883; \ }) #else -#define vsudot_lane_s32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ - int32x2_t __ret_884; \ - int32x2_t __s0_884 = __p0_884; \ - int8x8_t __s1_884 = __p1_884; \ - uint8x8_t __s2_884 = __p2_884; \ - int32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \ - int8x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_884 = __rev2_884; \ - __ret_884 = __noswap_vusdot_s32(__rev0_884, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_884, __p3_884)), __rev1_884); \ - __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \ +#define vsetq_lane_f16(__p0_884, __p1_884, __p2_884) __extension__ ({ \ + float16x8_t __ret_884; \ + float16_t __s0_884 = __p0_884; \ + float16x8_t __s1_884 = __p1_884; \ + float16x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16_t __reint_884 = __s0_884; \ +float16x8_t __reint1_884 = __rev1_884; \ +int16x8_t __reint2_884 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_884, *(int16x8_t *) &__reint1_884, __p2_884); \ + __ret_884 = *(float16x8_t *) &__reint2_884; \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_884; \ }) #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + float32x4_t __ret_885; \ + float32x4_t __s0_885 = __p0_885; \ + float16x8_t __s1_885 = __p1_885; \ + float16x4_t __s2_885 = __p2_885; \ + __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ + __ret_885; \ +}) +#else +#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + float32x4_t __ret_886; \ + float32x4_t __s0_886 = __p0_886; \ + float16x8_t __s1_886 = __p1_886; \ + float16x4_t __s2_886 = __p2_886; \ + float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ + float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ + __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ + __ret_886; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float32x2_t __ret_887; \ + float32x2_t __s0_887 = __p0_887; \ + float16x4_t __s1_887 = __p1_887; \ + float16x4_t __s2_887 = __p2_887; \ + __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ + __ret_887; \ +}) +#else +#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float32x2_t __ret_888; \ + float32x2_t __s0_888 = __p0_888; \ + float16x4_t __s1_888 = __p1_888; \ + float16x4_t __s2_888 = __p2_888; \ + float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ + float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ + __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + float32x4_t __ret_889; \ + float32x4_t __s0_889 = __p0_889; \ + float16x8_t __s1_889 = __p1_889; \ + float16x4_t __s2_889 = __p2_889; \ + __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ + __ret_889; \ +}) +#else +#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float32x4_t __ret_890; \ + float32x4_t __s0_890 = __p0_890; \ + float16x8_t __s1_890 = __p1_890; \ + float16x4_t __s2_890 = __p2_890; \ + float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ + float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ + __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ + __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ + __ret_890; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + float32x2_t __ret_891; \ + float32x2_t __s0_891 = __p0_891; \ + float16x4_t __s1_891 = __p1_891; \ + float16x4_t __s2_891 = __p2_891; \ + __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ + __ret_891; \ +}) +#else +#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + float32x2_t __ret_892; \ + float32x2_t __s0_892 = __p0_892; \ + float16x4_t __s1_892 = __p1_892; \ + float16x4_t __s2_892 = __p2_892; \ + float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ + float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ + __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float32x4_t __ret_893; \ + float32x4_t __s0_893 = __p0_893; \ + float16x8_t __s1_893 = __p1_893; \ + float16x8_t __s2_893 = __p2_893; \ + __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ + __ret_893; \ +}) +#else +#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float32x4_t __ret_894; \ + float32x4_t __s0_894 = __p0_894; \ + float16x8_t __s1_894 = __p1_894; \ + float16x8_t __s2_894 = __p2_894; \ + float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ + float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ + __ret_894; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + float32x2_t __ret_895; \ + float32x2_t __s0_895 = __p0_895; \ + float16x4_t __s1_895 = __p1_895; \ + float16x8_t __s2_895 = __p2_895; \ + __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ + __ret_895; \ +}) +#else +#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + float32x2_t __ret_896; \ + float32x2_t __s0_896 = __p0_896; \ + float16x4_t __s1_896 = __p1_896; \ + float16x8_t __s2_896 = __p2_896; \ + float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ + float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ + float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ + __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ + __ret_896; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float32x4_t __ret_897; \ + float32x4_t __s0_897 = __p0_897; \ + float16x8_t __s1_897 = __p1_897; \ + float16x8_t __s2_897 = __p2_897; \ + __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ + __ret_897; \ +}) +#else +#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float32x4_t __ret_898; \ + float32x4_t __s0_898 = __p0_898; \ + float16x8_t __s1_898 = __p1_898; \ + float16x8_t __s2_898 = __p2_898; \ + float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ + float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ + __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ + __ret_898; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ + float32x2_t __ret_899; \ + float32x2_t __s0_899 = __p0_899; \ + float16x4_t __s1_899 = __p1_899; \ + float16x8_t __s2_899 = __p2_899; \ + __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ + __ret_899; \ +}) +#else +#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ + float32x2_t __ret_900; \ + float32x2_t __s0_900 = __p0_900; \ + float16x4_t __s1_900 = __p1_900; \ + float16x8_t __s2_900 = __p2_900; \ + float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ + float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ + float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ + __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ + __ret_900; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \ + float32x4_t __ret_901; \ + float32x4_t __s0_901 = __p0_901; \ + float16x8_t __s1_901 = __p1_901; \ + float16x4_t __s2_901 = __p2_901; \ + __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \ + __ret_901; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ + float32x4_t __ret_902; \ + float32x4_t __s0_902 = __p0_902; \ + float16x8_t __s1_902 = __p1_902; \ + float16x4_t __s2_902 = __p2_902; \ + float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \ + float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \ + __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \ + __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \ + __ret_902; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ + float32x2_t __ret_903; \ + float32x2_t __s0_903 = __p0_903; \ + float16x4_t __s1_903 = __p1_903; \ + float16x4_t __s2_903 = __p2_903; \ + __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \ + __ret_903; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ + float32x2_t __ret_904; \ + float32x2_t __s0_904 = __p0_904; \ + float16x4_t __s1_904 = __p1_904; \ + float16x4_t __s2_904 = __p2_904; \ + float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \ + float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \ + float16x4_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \ + __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \ + __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \ + __ret_904; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + float32x4_t __ret_905; \ + float32x4_t __s0_905 = __p0_905; \ + float16x8_t __s1_905 = __p1_905; \ + float16x4_t __s2_905 = __p2_905; \ + __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \ + __ret_905; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + float32x4_t __ret_906; \ + float32x4_t __s0_906 = __p0_906; \ + float16x8_t __s1_906 = __p1_906; \ + float16x4_t __s2_906 = __p2_906; \ + float32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ + float16x8_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \ + __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \ + __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ + __ret_906; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + float32x2_t __ret_907; \ + float32x2_t __s0_907 = __p0_907; \ + float16x4_t __s1_907 = __p1_907; \ + float16x4_t __s2_907 = __p2_907; \ + __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \ + __ret_907; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + float32x2_t __ret_908; \ + float32x2_t __s0_908 = __p0_908; \ + float16x4_t __s1_908 = __p1_908; \ + float16x4_t __s2_908 = __p2_908; \ + float32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ + float16x4_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \ + float16x4_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \ + __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \ + __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ + __ret_908; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + float32x4_t __ret_909; \ + float32x4_t __s0_909 = __p0_909; \ + float16x8_t __s1_909 = __p1_909; \ + float16x8_t __s2_909 = __p2_909; \ + __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \ + __ret_909; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + float32x4_t __ret_910; \ + float32x4_t __s0_910 = __p0_910; \ + float16x8_t __s1_910 = __p1_910; \ + float16x8_t __s2_910 = __p2_910; \ + float32x4_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \ + float16x8_t __rev1_910; __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \ + __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \ + __ret_910; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + float32x2_t __ret_911; \ + float32x2_t __s0_911 = __p0_911; \ + float16x4_t __s1_911 = __p1_911; \ + float16x8_t __s2_911 = __p2_911; \ + __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \ + __ret_911; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + float32x2_t __ret_912; \ + float32x2_t __s0_912 = __p0_912; \ + float16x4_t __s1_912 = __p1_912; \ + float16x8_t __s2_912 = __p2_912; \ + float32x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \ + float16x4_t __rev1_912; __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \ + float16x8_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \ + __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \ + __ret_912; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + float32x4_t __ret_913; \ + float32x4_t __s0_913 = __p0_913; \ + float16x8_t __s1_913 = __p1_913; \ + float16x8_t __s2_913 = __p2_913; \ + __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \ + __ret_913; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + float32x4_t __ret_914; \ + float32x4_t __s0_914 = __p0_914; \ + float16x8_t __s1_914 = __p1_914; \ + float16x8_t __s2_914 = __p2_914; \ + float32x4_t __rev0_914; __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \ + float16x8_t __rev1_914; __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \ + __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \ + __ret_914; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + float32x2_t __ret_915; \ + float32x2_t __s0_915 = __p0_915; \ + float16x4_t __s1_915 = __p1_915; \ + float16x8_t __s2_915 = __p2_915; \ + __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \ + __ret_915; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + float32x2_t __ret_916; \ + float32x2_t __s0_916 = __p0_916; \ + float16x4_t __s1_916 = __p1_916; \ + float16x8_t __s2_916 = __p2_916; \ + float32x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \ + float16x4_t __rev1_916; __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \ + float16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \ + __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \ + __ret_916; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \ + float16_t __ret_917; \ + float16_t __s0_917 = __p0_917; \ + float16x4_t __s1_917 = __p1_917; \ + __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \ + __ret_917; \ +}) +#else +#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \ + float16_t __ret_918; \ + float16_t __s0_918 = __p0_918; \ + float16x4_t __s1_918 = __p1_918; \ + float16x4_t __rev1_918; __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \ + __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \ + __ret_918; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \ + float16_t __ret_919; \ + float16_t __s0_919 = __p0_919; \ + float16x8_t __s1_919 = __p1_919; \ + __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \ + __ret_919; \ +}) +#else +#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \ + float16_t __ret_920; \ + float16_t __s0_920 = __p0_920; \ + float16x8_t __s1_920 = __p1_920; \ + float16x8_t __rev1_920; __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \ + __ret_920; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); return __ret; } #else -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68155,13 +68573,13 @@ __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } #else -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68172,13 +68590,13 @@ __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); return __ret; } #else -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68189,13 +68607,13 @@ __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); return __ret; } #else -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68206,13 +68624,13 @@ __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68223,13 +68641,13 @@ __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68240,13 +68658,13 @@ __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68257,13 +68675,13 @@ __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68274,13 +68692,13 @@ __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68291,13 +68709,13 @@ __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68308,13 +68726,13 @@ __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68325,13 +68743,13 @@ __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68342,13 +68760,13 @@ __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68359,13 +68777,13 @@ __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68376,13 +68794,13 @@ __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68393,13 +68811,13 @@ __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68410,13 +68828,13 @@ __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68427,13 +68845,13 @@ __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68444,147 +68862,147 @@ __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p64(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ - poly64x2_t __ret_885; \ - poly64x2_t __s0_885 = __p0_885; \ - poly64x1_t __s2_885 = __p2_885; \ - __ret_885 = vsetq_lane_p64(vget_lane_p64(__s2_885, __p3_885), __s0_885, __p1_885); \ - __ret_885; \ +#define vcopyq_lane_p64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + poly64x2_t __ret_921; \ + poly64x2_t __s0_921 = __p0_921; \ + poly64x1_t __s2_921 = __p2_921; \ + __ret_921 = vsetq_lane_p64(vget_lane_p64(__s2_921, __p3_921), __s0_921, __p1_921); \ + __ret_921; \ }) #else -#define vcopyq_lane_p64(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ - poly64x2_t __ret_886; \ - poly64x2_t __s0_886 = __p0_886; \ - poly64x1_t __s2_886 = __p2_886; \ - poly64x2_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 1, 0); \ - __ret_886 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_886, __p3_886), __rev0_886, __p1_886); \ - __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 1, 0); \ - __ret_886; \ +#define vcopyq_lane_p64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + poly64x2_t __ret_922; \ + poly64x2_t __s0_922 = __p0_922; \ + poly64x1_t __s2_922 = __p2_922; \ + poly64x2_t __rev0_922; __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 1, 0); \ + __ret_922 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_922, __p3_922), __rev0_922, __p1_922); \ + __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 1, 0); \ + __ret_922; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f64(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ - float64x2_t __ret_887; \ - float64x2_t __s0_887 = __p0_887; \ - float64x1_t __s2_887 = __p2_887; \ - __ret_887 = vsetq_lane_f64(vget_lane_f64(__s2_887, __p3_887), __s0_887, __p1_887); \ - __ret_887; \ +#define vcopyq_lane_f64(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + float64x2_t __ret_923; \ + float64x2_t __s0_923 = __p0_923; \ + float64x1_t __s2_923 = __p2_923; \ + __ret_923 = vsetq_lane_f64(vget_lane_f64(__s2_923, __p3_923), __s0_923, __p1_923); \ + __ret_923; \ }) #else -#define vcopyq_lane_f64(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ - float64x2_t __ret_888; \ - float64x2_t __s0_888 = __p0_888; \ - float64x1_t __s2_888 = __p2_888; \ - float64x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ - __ret_888 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_888, __p3_888), __rev0_888, __p1_888); \ - __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ - __ret_888; \ +#define vcopyq_lane_f64(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + float64x2_t __ret_924; \ + float64x2_t __s0_924 = __p0_924; \ + float64x1_t __s2_924 = __p2_924; \ + float64x2_t __rev0_924; __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \ + __ret_924 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_924, __p3_924), __rev0_924, __p1_924); \ + __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \ + __ret_924; \ }) #endif -#define vcopy_lane_p64(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ - poly64x1_t __ret_889; \ - poly64x1_t __s0_889 = __p0_889; \ - poly64x1_t __s2_889 = __p2_889; \ - __ret_889 = vset_lane_p64(vget_lane_p64(__s2_889, __p3_889), __s0_889, __p1_889); \ - __ret_889; \ +#define vcopy_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + poly64x1_t __ret_925; \ + poly64x1_t __s0_925 = __p0_925; \ + poly64x1_t __s2_925 = __p2_925; \ + __ret_925 = vset_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ + __ret_925; \ }) -#define vcopy_lane_f64(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ - float64x1_t __ret_890; \ - float64x1_t __s0_890 = __p0_890; \ - float64x1_t __s2_890 = __p2_890; \ - __ret_890 = vset_lane_f64(vget_lane_f64(__s2_890, __p3_890), __s0_890, __p1_890); \ - __ret_890; \ +#define vcopy_lane_f64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + float64x1_t __ret_926; \ + float64x1_t __s0_926 = __p0_926; \ + float64x1_t __s2_926 = __p2_926; \ + __ret_926 = vset_lane_f64(vget_lane_f64(__s2_926, __p3_926), __s0_926, __p1_926); \ + __ret_926; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p64(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ - poly64x2_t __ret_891; \ - poly64x2_t __s0_891 = __p0_891; \ - poly64x2_t __s2_891 = __p2_891; \ - __ret_891 = vsetq_lane_p64(vgetq_lane_p64(__s2_891, __p3_891), __s0_891, __p1_891); \ - __ret_891; \ +#define vcopyq_laneq_p64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + poly64x2_t __ret_927; \ + poly64x2_t __s0_927 = __p0_927; \ + poly64x2_t __s2_927 = __p2_927; \ + __ret_927 = vsetq_lane_p64(vgetq_lane_p64(__s2_927, __p3_927), __s0_927, __p1_927); \ + __ret_927; \ }) #else -#define vcopyq_laneq_p64(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ - poly64x2_t __ret_892; \ - poly64x2_t __s0_892 = __p0_892; \ - poly64x2_t __s2_892 = __p2_892; \ - poly64x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ - poly64x2_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 1, 0); \ - __ret_892 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_892, __p3_892), __rev0_892, __p1_892); \ - __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ - __ret_892; \ +#define vcopyq_laneq_p64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + poly64x2_t __ret_928; \ + poly64x2_t __s0_928 = __p0_928; \ + poly64x2_t __s2_928 = __p2_928; \ + poly64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ + poly64x2_t __rev2_928; __rev2_928 = __builtin_shufflevector(__s2_928, __s2_928, 1, 0); \ + __ret_928 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_928, __p3_928), __rev0_928, __p1_928); \ + __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ + __ret_928; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f64(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ - float64x2_t __ret_893; \ - float64x2_t __s0_893 = __p0_893; \ - float64x2_t __s2_893 = __p2_893; \ - __ret_893 = vsetq_lane_f64(vgetq_lane_f64(__s2_893, __p3_893), __s0_893, __p1_893); \ - __ret_893; \ +#define vcopyq_laneq_f64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + float64x2_t __ret_929; \ + float64x2_t __s0_929 = __p0_929; \ + float64x2_t __s2_929 = __p2_929; \ + __ret_929 = vsetq_lane_f64(vgetq_lane_f64(__s2_929, __p3_929), __s0_929, __p1_929); \ + __ret_929; \ }) #else -#define vcopyq_laneq_f64(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ - float64x2_t __ret_894; \ - float64x2_t __s0_894 = __p0_894; \ - float64x2_t __s2_894 = __p2_894; \ - float64x2_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 1, 0); \ - float64x2_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 1, 0); \ - __ret_894 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_894, __p3_894), __rev0_894, __p1_894); \ - __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 1, 0); \ - __ret_894; \ +#define vcopyq_laneq_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float64x2_t __ret_930; \ + float64x2_t __s0_930 = __p0_930; \ + float64x2_t __s2_930 = __p2_930; \ + float64x2_t __rev0_930; __rev0_930 = __builtin_shufflevector(__s0_930, __s0_930, 1, 0); \ + float64x2_t __rev2_930; __rev2_930 = __builtin_shufflevector(__s2_930, __s2_930, 1, 0); \ + __ret_930 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_930, __p3_930), __rev0_930, __p1_930); \ + __ret_930 = __builtin_shufflevector(__ret_930, __ret_930, 1, 0); \ + __ret_930; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p64(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ - poly64x1_t __ret_895; \ - poly64x1_t __s0_895 = __p0_895; \ - poly64x2_t __s2_895 = __p2_895; \ - __ret_895 = vset_lane_p64(vgetq_lane_p64(__s2_895, __p3_895), __s0_895, __p1_895); \ - __ret_895; \ +#define vcopy_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + poly64x1_t __ret_931; \ + poly64x1_t __s0_931 = __p0_931; \ + poly64x2_t __s2_931 = __p2_931; \ + __ret_931 = vset_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ + __ret_931; \ }) #else -#define vcopy_laneq_p64(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ - poly64x1_t __ret_896; \ - poly64x1_t __s0_896 = __p0_896; \ - poly64x2_t __s2_896 = __p2_896; \ - poly64x2_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 1, 0); \ - __ret_896 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_896, __p3_896), __s0_896, __p1_896); \ - __ret_896; \ +#define vcopy_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + poly64x1_t __ret_932; \ + poly64x1_t __s0_932 = __p0_932; \ + poly64x2_t __s2_932 = __p2_932; \ + poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ + __ret_932 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __s0_932, __p1_932); \ + __ret_932; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f64(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ - float64x1_t __ret_897; \ - float64x1_t __s0_897 = __p0_897; \ - float64x2_t __s2_897 = __p2_897; \ - __ret_897 = vset_lane_f64(vgetq_lane_f64(__s2_897, __p3_897), __s0_897, __p1_897); \ - __ret_897; \ +#define vcopy_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float64x1_t __ret_933; \ + float64x1_t __s0_933 = __p0_933; \ + float64x2_t __s2_933 = __p2_933; \ + __ret_933 = vset_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ + __ret_933; \ }) #else -#define vcopy_laneq_f64(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ - float64x1_t __ret_898; \ - float64x1_t __s0_898 = __p0_898; \ - float64x2_t __s2_898 = __p2_898; \ - float64x2_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 1, 0); \ - __ret_898 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_898, __p3_898), __s0_898, __p1_898); \ - __ret_898; \ +#define vcopy_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ + float64x1_t __ret_934; \ + float64x1_t __s0_934 = __p0_934; \ + float64x2_t __s2_934 = __p2_934; \ + float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ + __ret_934 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __s0_934, __p1_934); \ + __ret_934; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68596,13 +69014,13 @@ __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68614,13 +69032,13 @@ __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68632,13 +69050,13 @@ __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68650,13 +69068,13 @@ __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68668,13 +69086,13 @@ __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68686,13 +69104,13 @@ __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68703,13 +69121,13 @@ __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68720,13 +69138,13 @@ __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68737,13 +69155,13 @@ __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68754,13 +69172,13 @@ __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68772,13 +69190,13 @@ __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68790,13 +69208,13 @@ __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68808,13 +69226,13 @@ __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68826,13 +69244,13 @@ __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68844,13 +69262,13 @@ __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68862,13 +69280,13 @@ __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68879,13 +69297,13 @@ __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68896,13 +69314,13 @@ __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68913,13 +69331,13 @@ __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68929,472 +69347,50 @@ __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { } #endif -#define vmulx_lane_f64(__p0_899, __p1_899, __p2_899) __extension__ ({ \ - float64x1_t __ret_899; \ - float64x1_t __s0_899 = __p0_899; \ - float64x1_t __s1_899 = __p1_899; \ - float64_t __x_899 = vget_lane_f64(__s0_899, 0); \ - float64_t __y_899 = vget_lane_f64(__s1_899, __p2_899); \ - float64_t __z_899 = vmulxd_f64(__x_899, __y_899); \ - __ret_899 = vset_lane_f64(__z_899, __s0_899, __p2_899); \ - __ret_899; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f64(__p0_900, __p1_900, __p2_900) __extension__ ({ \ - float64x1_t __ret_900; \ - float64x1_t __s0_900 = __p0_900; \ - float64x2_t __s1_900 = __p1_900; \ - float64_t __x_900 = vget_lane_f64(__s0_900, 0); \ - float64_t __y_900 = vgetq_lane_f64(__s1_900, __p2_900); \ - float64_t __z_900 = vmulxd_f64(__x_900, __y_900); \ - __ret_900 = vset_lane_f64(__z_900, __s0_900, 0); \ - __ret_900; \ -}) -#else -#define vmulx_laneq_f64(__p0_901, __p1_901, __p2_901) __extension__ ({ \ - float64x1_t __ret_901; \ - float64x1_t __s0_901 = __p0_901; \ - float64x2_t __s1_901 = __p1_901; \ - float64x2_t __rev1_901; __rev1_901 = __builtin_shufflevector(__s1_901, __s1_901, 1, 0); \ - float64_t __x_901 = vget_lane_f64(__s0_901, 0); \ - float64_t __y_901 = __noswap_vgetq_lane_f64(__rev1_901, __p2_901); \ - float64_t __z_901 = vmulxd_f64(__x_901, __y_901); \ - __ret_901 = vset_lane_f64(__z_901, __s0_901, 0); \ - __ret_901; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ - float32x4_t __ret_902; \ - float32x4_t __s0_902 = __p0_902; \ - float16x8_t __s1_902 = __p1_902; \ - float16x4_t __s2_902 = __p2_902; \ - __ret_902 = vfmlalq_high_f16(__s0_902, __s1_902, (float16x8_t) {vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902)}); \ - __ret_902; \ -}) -#else -#define vfmlalq_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ - float32x4_t __ret_903; \ - float32x4_t __s0_903 = __p0_903; \ - float16x8_t __s1_903 = __p1_903; \ - float16x4_t __s2_903 = __p2_903; \ - float32x4_t __rev0_903; __rev0_903 = __builtin_shufflevector(__s0_903, __s0_903, 3, 2, 1, 0); \ - float16x8_t __rev1_903; __rev1_903 = __builtin_shufflevector(__s1_903, __s1_903, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_903; __rev2_903 = __builtin_shufflevector(__s2_903, __s2_903, 3, 2, 1, 0); \ - __ret_903 = __noswap_vfmlalq_high_f16(__rev0_903, __rev1_903, (float16x8_t) {__noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903)}); \ - __ret_903 = __builtin_shufflevector(__ret_903, __ret_903, 3, 2, 1, 0); \ - __ret_903; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ - float32x2_t __ret_904; \ - float32x2_t __s0_904 = __p0_904; \ - float16x4_t __s1_904 = __p1_904; \ - float16x4_t __s2_904 = __p2_904; \ - __ret_904 = vfmlal_high_f16(__s0_904, __s1_904, (float16x4_t) {vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904)}); \ - __ret_904; \ -}) -#else -#define vfmlal_lane_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ - float32x2_t __ret_905; \ - float32x2_t __s0_905 = __p0_905; \ - float16x4_t __s1_905 = __p1_905; \ - float16x4_t __s2_905 = __p2_905; \ - float32x2_t __rev0_905; __rev0_905 = __builtin_shufflevector(__s0_905, __s0_905, 1, 0); \ - float16x4_t __rev1_905; __rev1_905 = __builtin_shufflevector(__s1_905, __s1_905, 3, 2, 1, 0); \ - float16x4_t __rev2_905; __rev2_905 = __builtin_shufflevector(__s2_905, __s2_905, 3, 2, 1, 0); \ - __ret_905 = __noswap_vfmlal_high_f16(__rev0_905, __rev1_905, (float16x4_t) {__noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905)}); \ - __ret_905 = __builtin_shufflevector(__ret_905, __ret_905, 1, 0); \ - __ret_905; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ - float32x4_t __ret_906; \ - float32x4_t __s0_906 = __p0_906; \ - float16x8_t __s1_906 = __p1_906; \ - float16x4_t __s2_906 = __p2_906; \ - __ret_906 = vfmlalq_low_f16(__s0_906, __s1_906, (float16x8_t) {vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906)}); \ - __ret_906; \ -}) -#else -#define vfmlalq_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ - float32x4_t __ret_907; \ - float32x4_t __s0_907 = __p0_907; \ - float16x8_t __s1_907 = __p1_907; \ - float16x4_t __s2_907 = __p2_907; \ - float32x4_t __rev0_907; __rev0_907 = __builtin_shufflevector(__s0_907, __s0_907, 3, 2, 1, 0); \ - float16x8_t __rev1_907; __rev1_907 = __builtin_shufflevector(__s1_907, __s1_907, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_907; __rev2_907 = __builtin_shufflevector(__s2_907, __s2_907, 3, 2, 1, 0); \ - __ret_907 = __noswap_vfmlalq_low_f16(__rev0_907, __rev1_907, (float16x8_t) {__noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907)}); \ - __ret_907 = __builtin_shufflevector(__ret_907, __ret_907, 3, 2, 1, 0); \ - __ret_907; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ - float32x2_t __ret_908; \ - float32x2_t __s0_908 = __p0_908; \ - float16x4_t __s1_908 = __p1_908; \ - float16x4_t __s2_908 = __p2_908; \ - __ret_908 = vfmlal_low_f16(__s0_908, __s1_908, (float16x4_t) {vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908)}); \ - __ret_908; \ -}) -#else -#define vfmlal_lane_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ - float32x2_t __ret_909; \ - float32x2_t __s0_909 = __p0_909; \ - float16x4_t __s1_909 = __p1_909; \ - float16x4_t __s2_909 = __p2_909; \ - float32x2_t __rev0_909; __rev0_909 = __builtin_shufflevector(__s0_909, __s0_909, 1, 0); \ - float16x4_t __rev1_909; __rev1_909 = __builtin_shufflevector(__s1_909, __s1_909, 3, 2, 1, 0); \ - float16x4_t __rev2_909; __rev2_909 = __builtin_shufflevector(__s2_909, __s2_909, 3, 2, 1, 0); \ - __ret_909 = __noswap_vfmlal_low_f16(__rev0_909, __rev1_909, (float16x4_t) {__noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909)}); \ - __ret_909 = __builtin_shufflevector(__ret_909, __ret_909, 1, 0); \ - __ret_909; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ - float32x4_t __ret_910; \ - float32x4_t __s0_910 = __p0_910; \ - float16x8_t __s1_910 = __p1_910; \ - float16x8_t __s2_910 = __p2_910; \ - __ret_910 = vfmlalq_high_f16(__s0_910, __s1_910, (float16x8_t) {vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910)}); \ - __ret_910; \ -}) -#else -#define vfmlalq_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ - float32x4_t __ret_911; \ - float32x4_t __s0_911 = __p0_911; \ - float16x8_t __s1_911 = __p1_911; \ - float16x8_t __s2_911 = __p2_911; \ - float32x4_t __rev0_911; __rev0_911 = __builtin_shufflevector(__s0_911, __s0_911, 3, 2, 1, 0); \ - float16x8_t __rev1_911; __rev1_911 = __builtin_shufflevector(__s1_911, __s1_911, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_911; __rev2_911 = __builtin_shufflevector(__s2_911, __s2_911, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_911 = __noswap_vfmlalq_high_f16(__rev0_911, __rev1_911, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911)}); \ - __ret_911 = __builtin_shufflevector(__ret_911, __ret_911, 3, 2, 1, 0); \ - __ret_911; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ - float32x2_t __ret_912; \ - float32x2_t __s0_912 = __p0_912; \ - float16x4_t __s1_912 = __p1_912; \ - float16x8_t __s2_912 = __p2_912; \ - __ret_912 = vfmlal_high_f16(__s0_912, __s1_912, (float16x4_t) {vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912)}); \ - __ret_912; \ -}) -#else -#define vfmlal_laneq_high_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ - float32x2_t __ret_913; \ - float32x2_t __s0_913 = __p0_913; \ - float16x4_t __s1_913 = __p1_913; \ - float16x8_t __s2_913 = __p2_913; \ - float32x2_t __rev0_913; __rev0_913 = __builtin_shufflevector(__s0_913, __s0_913, 1, 0); \ - float16x4_t __rev1_913; __rev1_913 = __builtin_shufflevector(__s1_913, __s1_913, 3, 2, 1, 0); \ - float16x8_t __rev2_913; __rev2_913 = __builtin_shufflevector(__s2_913, __s2_913, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_913 = __noswap_vfmlal_high_f16(__rev0_913, __rev1_913, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913)}); \ - __ret_913 = __builtin_shufflevector(__ret_913, __ret_913, 1, 0); \ - __ret_913; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ - float32x4_t __ret_914; \ - float32x4_t __s0_914 = __p0_914; \ - float16x8_t __s1_914 = __p1_914; \ - float16x8_t __s2_914 = __p2_914; \ - __ret_914 = vfmlalq_low_f16(__s0_914, __s1_914, (float16x8_t) {vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914)}); \ - __ret_914; \ -}) -#else -#define vfmlalq_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ - float32x4_t __ret_915; \ - float32x4_t __s0_915 = __p0_915; \ - float16x8_t __s1_915 = __p1_915; \ - float16x8_t __s2_915 = __p2_915; \ - float32x4_t __rev0_915; __rev0_915 = __builtin_shufflevector(__s0_915, __s0_915, 3, 2, 1, 0); \ - float16x8_t __rev1_915; __rev1_915 = __builtin_shufflevector(__s1_915, __s1_915, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_915; __rev2_915 = __builtin_shufflevector(__s2_915, __s2_915, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_915 = __noswap_vfmlalq_low_f16(__rev0_915, __rev1_915, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915)}); \ - __ret_915 = __builtin_shufflevector(__ret_915, __ret_915, 3, 2, 1, 0); \ - __ret_915; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ - float32x2_t __ret_916; \ - float32x2_t __s0_916 = __p0_916; \ - float16x4_t __s1_916 = __p1_916; \ - float16x8_t __s2_916 = __p2_916; \ - __ret_916 = vfmlal_low_f16(__s0_916, __s1_916, (float16x4_t) {vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916)}); \ - __ret_916; \ -}) -#else -#define vfmlal_laneq_low_f16(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \ - float32x2_t __ret_917; \ - float32x2_t __s0_917 = __p0_917; \ - float16x4_t __s1_917 = __p1_917; \ - float16x8_t __s2_917 = __p2_917; \ - float32x2_t __rev0_917; __rev0_917 = __builtin_shufflevector(__s0_917, __s0_917, 1, 0); \ - float16x4_t __rev1_917; __rev1_917 = __builtin_shufflevector(__s1_917, __s1_917, 3, 2, 1, 0); \ - float16x8_t __rev2_917; __rev2_917 = __builtin_shufflevector(__s2_917, __s2_917, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_917 = __noswap_vfmlal_low_f16(__rev0_917, __rev1_917, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917)}); \ - __ret_917 = __builtin_shufflevector(__ret_917, __ret_917, 1, 0); \ - __ret_917; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_high_f16(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \ - float32x4_t __ret_918; \ - float32x4_t __s0_918 = __p0_918; \ - float16x8_t __s1_918 = __p1_918; \ - float16x4_t __s2_918 = __p2_918; \ - __ret_918 = vfmlslq_high_f16(__s0_918, __s1_918, (float16x8_t) {vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918)}); \ - __ret_918; \ -}) -#else -#define vfmlslq_lane_high_f16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \ - float32x4_t __ret_919; \ - float32x4_t __s0_919 = __p0_919; \ - float16x8_t __s1_919 = __p1_919; \ - float16x4_t __s2_919 = __p2_919; \ - float32x4_t __rev0_919; __rev0_919 = __builtin_shufflevector(__s0_919, __s0_919, 3, 2, 1, 0); \ - float16x8_t __rev1_919; __rev1_919 = __builtin_shufflevector(__s1_919, __s1_919, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_919; __rev2_919 = __builtin_shufflevector(__s2_919, __s2_919, 3, 2, 1, 0); \ - __ret_919 = __noswap_vfmlslq_high_f16(__rev0_919, __rev1_919, (float16x8_t) {__noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919)}); \ - __ret_919 = __builtin_shufflevector(__ret_919, __ret_919, 3, 2, 1, 0); \ - __ret_919; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_high_f16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \ - float32x2_t __ret_920; \ - float32x2_t __s0_920 = __p0_920; \ - float16x4_t __s1_920 = __p1_920; \ - float16x4_t __s2_920 = __p2_920; \ - __ret_920 = vfmlsl_high_f16(__s0_920, __s1_920, (float16x4_t) {vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920)}); \ - __ret_920; \ -}) -#else -#define vfmlsl_lane_high_f16(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ - float32x2_t __ret_921; \ - float32x2_t __s0_921 = __p0_921; \ - float16x4_t __s1_921 = __p1_921; \ - float16x4_t __s2_921 = __p2_921; \ - float32x2_t __rev0_921; __rev0_921 = __builtin_shufflevector(__s0_921, __s0_921, 1, 0); \ - float16x4_t __rev1_921; __rev1_921 = __builtin_shufflevector(__s1_921, __s1_921, 3, 2, 1, 0); \ - float16x4_t __rev2_921; __rev2_921 = __builtin_shufflevector(__s2_921, __s2_921, 3, 2, 1, 0); \ - __ret_921 = __noswap_vfmlsl_high_f16(__rev0_921, __rev1_921, (float16x4_t) {__noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921)}); \ - __ret_921 = __builtin_shufflevector(__ret_921, __ret_921, 1, 0); \ - __ret_921; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_low_f16(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ - float32x4_t __ret_922; \ - float32x4_t __s0_922 = __p0_922; \ - float16x8_t __s1_922 = __p1_922; \ - float16x4_t __s2_922 = __p2_922; \ - __ret_922 = vfmlslq_low_f16(__s0_922, __s1_922, (float16x8_t) {vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922)}); \ - __ret_922; \ -}) -#else -#define vfmlslq_lane_low_f16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ - float32x4_t __ret_923; \ - float32x4_t __s0_923 = __p0_923; \ - float16x8_t __s1_923 = __p1_923; \ - float16x4_t __s2_923 = __p2_923; \ - float32x4_t __rev0_923; __rev0_923 = __builtin_shufflevector(__s0_923, __s0_923, 3, 2, 1, 0); \ - float16x8_t __rev1_923; __rev1_923 = __builtin_shufflevector(__s1_923, __s1_923, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_923; __rev2_923 = __builtin_shufflevector(__s2_923, __s2_923, 3, 2, 1, 0); \ - __ret_923 = __noswap_vfmlslq_low_f16(__rev0_923, __rev1_923, (float16x8_t) {__noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923)}); \ - __ret_923 = __builtin_shufflevector(__ret_923, __ret_923, 3, 2, 1, 0); \ - __ret_923; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_low_f16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ - float32x2_t __ret_924; \ - float32x2_t __s0_924 = __p0_924; \ - float16x4_t __s1_924 = __p1_924; \ - float16x4_t __s2_924 = __p2_924; \ - __ret_924 = vfmlsl_low_f16(__s0_924, __s1_924, (float16x4_t) {vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924)}); \ - __ret_924; \ -}) -#else -#define vfmlsl_lane_low_f16(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ - float32x2_t __ret_925; \ - float32x2_t __s0_925 = __p0_925; \ - float16x4_t __s1_925 = __p1_925; \ - float16x4_t __s2_925 = __p2_925; \ - float32x2_t __rev0_925; __rev0_925 = __builtin_shufflevector(__s0_925, __s0_925, 1, 0); \ - float16x4_t __rev1_925; __rev1_925 = __builtin_shufflevector(__s1_925, __s1_925, 3, 2, 1, 0); \ - float16x4_t __rev2_925; __rev2_925 = __builtin_shufflevector(__s2_925, __s2_925, 3, 2, 1, 0); \ - __ret_925 = __noswap_vfmlsl_low_f16(__rev0_925, __rev1_925, (float16x4_t) {__noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925)}); \ - __ret_925 = __builtin_shufflevector(__ret_925, __ret_925, 1, 0); \ - __ret_925; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_high_f16(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ - float32x4_t __ret_926; \ - float32x4_t __s0_926 = __p0_926; \ - float16x8_t __s1_926 = __p1_926; \ - float16x8_t __s2_926 = __p2_926; \ - __ret_926 = vfmlslq_high_f16(__s0_926, __s1_926, (float16x8_t) {vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926)}); \ - __ret_926; \ -}) -#else -#define vfmlslq_laneq_high_f16(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ - float32x4_t __ret_927; \ - float32x4_t __s0_927 = __p0_927; \ - float16x8_t __s1_927 = __p1_927; \ - float16x8_t __s2_927 = __p2_927; \ - float32x4_t __rev0_927; __rev0_927 = __builtin_shufflevector(__s0_927, __s0_927, 3, 2, 1, 0); \ - float16x8_t __rev1_927; __rev1_927 = __builtin_shufflevector(__s1_927, __s1_927, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_927; __rev2_927 = __builtin_shufflevector(__s2_927, __s2_927, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_927 = __noswap_vfmlslq_high_f16(__rev0_927, __rev1_927, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927)}); \ - __ret_927 = __builtin_shufflevector(__ret_927, __ret_927, 3, 2, 1, 0); \ - __ret_927; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_high_f16(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ - float32x2_t __ret_928; \ - float32x2_t __s0_928 = __p0_928; \ - float16x4_t __s1_928 = __p1_928; \ - float16x8_t __s2_928 = __p2_928; \ - __ret_928 = vfmlsl_high_f16(__s0_928, __s1_928, (float16x4_t) {vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928)}); \ - __ret_928; \ -}) -#else -#define vfmlsl_laneq_high_f16(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ - float32x2_t __ret_929; \ - float32x2_t __s0_929 = __p0_929; \ - float16x4_t __s1_929 = __p1_929; \ - float16x8_t __s2_929 = __p2_929; \ - float32x2_t __rev0_929; __rev0_929 = __builtin_shufflevector(__s0_929, __s0_929, 1, 0); \ - float16x4_t __rev1_929; __rev1_929 = __builtin_shufflevector(__s1_929, __s1_929, 3, 2, 1, 0); \ - float16x8_t __rev2_929; __rev2_929 = __builtin_shufflevector(__s2_929, __s2_929, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_929 = __noswap_vfmlsl_high_f16(__rev0_929, __rev1_929, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929)}); \ - __ret_929 = __builtin_shufflevector(__ret_929, __ret_929, 1, 0); \ - __ret_929; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_low_f16(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ - float32x4_t __ret_930; \ - float32x4_t __s0_930 = __p0_930; \ - float16x8_t __s1_930 = __p1_930; \ - float16x8_t __s2_930 = __p2_930; \ - __ret_930 = vfmlslq_low_f16(__s0_930, __s1_930, (float16x8_t) {vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930)}); \ - __ret_930; \ -}) -#else -#define vfmlslq_laneq_low_f16(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ - float32x4_t __ret_931; \ - float32x4_t __s0_931 = __p0_931; \ - float16x8_t __s1_931 = __p1_931; \ - float16x8_t __s2_931 = __p2_931; \ - float32x4_t __rev0_931; __rev0_931 = __builtin_shufflevector(__s0_931, __s0_931, 3, 2, 1, 0); \ - float16x8_t __rev1_931; __rev1_931 = __builtin_shufflevector(__s1_931, __s1_931, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_931; __rev2_931 = __builtin_shufflevector(__s2_931, __s2_931, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_931 = __noswap_vfmlslq_low_f16(__rev0_931, __rev1_931, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931)}); \ - __ret_931 = __builtin_shufflevector(__ret_931, __ret_931, 3, 2, 1, 0); \ - __ret_931; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_low_f16(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ - float32x2_t __ret_932; \ - float32x2_t __s0_932 = __p0_932; \ - float16x4_t __s1_932 = __p1_932; \ - float16x8_t __s2_932 = __p2_932; \ - __ret_932 = vfmlsl_low_f16(__s0_932, __s1_932, (float16x4_t) {vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932)}); \ - __ret_932; \ -}) -#else -#define vfmlsl_laneq_low_f16(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ - float32x2_t __ret_933; \ - float32x2_t __s0_933 = __p0_933; \ - float16x4_t __s1_933 = __p1_933; \ - float16x8_t __s2_933 = __p2_933; \ - float32x2_t __rev0_933; __rev0_933 = __builtin_shufflevector(__s0_933, __s0_933, 1, 0); \ - float16x4_t __rev1_933; __rev1_933 = __builtin_shufflevector(__s1_933, __s1_933, 3, 2, 1, 0); \ - float16x8_t __rev2_933; __rev2_933 = __builtin_shufflevector(__s2_933, __s2_933, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_933 = __noswap_vfmlsl_low_f16(__rev0_933, __rev1_933, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933)}); \ - __ret_933 = __builtin_shufflevector(__ret_933, __ret_933, 1, 0); \ - __ret_933; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulh_lane_f16(__p0_934, __p1_934, __p2_934) __extension__ ({ \ - float16_t __ret_934; \ - float16_t __s0_934 = __p0_934; \ - float16x4_t __s1_934 = __p1_934; \ - __ret_934 = __s0_934 * vget_lane_f16(__s1_934, __p2_934); \ - __ret_934; \ -}) -#else -#define vmulh_lane_f16(__p0_935, __p1_935, __p2_935) __extension__ ({ \ - float16_t __ret_935; \ - float16_t __s0_935 = __p0_935; \ - float16x4_t __s1_935 = __p1_935; \ - float16x4_t __rev1_935; __rev1_935 = __builtin_shufflevector(__s1_935, __s1_935, 3, 2, 1, 0); \ - __ret_935 = __s0_935 * __noswap_vget_lane_f16(__rev1_935, __p2_935); \ +#define vmulx_lane_f64(__p0_935, __p1_935, __p2_935) __extension__ ({ \ + float64x1_t __ret_935; \ + float64x1_t __s0_935 = __p0_935; \ + float64x1_t __s1_935 = __p1_935; \ + float64_t __x_935 = vget_lane_f64(__s0_935, 0); \ + float64_t __y_935 = vget_lane_f64(__s1_935, __p2_935); \ + float64_t __z_935 = vmulxd_f64(__x_935, __y_935); \ + __ret_935 = vset_lane_f64(__z_935, __s0_935, __p2_935); \ __ret_935; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmulh_laneq_f16(__p0_936, __p1_936, __p2_936) __extension__ ({ \ - float16_t __ret_936; \ - float16_t __s0_936 = __p0_936; \ - float16x8_t __s1_936 = __p1_936; \ - __ret_936 = __s0_936 * vgetq_lane_f16(__s1_936, __p2_936); \ +#define vmulx_laneq_f64(__p0_936, __p1_936, __p2_936) __extension__ ({ \ + float64x1_t __ret_936; \ + float64x1_t __s0_936 = __p0_936; \ + float64x2_t __s1_936 = __p1_936; \ + float64_t __x_936 = vget_lane_f64(__s0_936, 0); \ + float64_t __y_936 = vgetq_lane_f64(__s1_936, __p2_936); \ + float64_t __z_936 = vmulxd_f64(__x_936, __y_936); \ + __ret_936 = vset_lane_f64(__z_936, __s0_936, 0); \ __ret_936; \ }) #else -#define vmulh_laneq_f16(__p0_937, __p1_937, __p2_937) __extension__ ({ \ - float16_t __ret_937; \ - float16_t __s0_937 = __p0_937; \ - float16x8_t __s1_937 = __p1_937; \ - float16x8_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_937 = __s0_937 * __noswap_vgetq_lane_f16(__rev1_937, __p2_937); \ +#define vmulx_laneq_f64(__p0_937, __p1_937, __p2_937) __extension__ ({ \ + float64x1_t __ret_937; \ + float64x1_t __s0_937 = __p0_937; \ + float64x2_t __s1_937 = __p1_937; \ + float64x2_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 1, 0); \ + float64_t __x_937 = vget_lane_f64(__s0_937, 0); \ + float64_t __y_937 = __noswap_vgetq_lane_f64(__rev1_937, __p2_937); \ + float64_t __z_937 = vmulxd_f64(__x_937, __y_937); \ + __ret_937 = vset_lane_f64(__z_937, __s0_937, 0); \ __ret_937; \ }) #endif #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdl_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69403,7 +69399,7 @@ __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); return __ret; @@ -69411,13 +69407,13 @@ __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vabdl_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -69426,7 +69422,7 @@ __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); return __ret; @@ -69434,13 +69430,13 @@ __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdl_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69449,7 +69445,7 @@ __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); return __ret; @@ -69457,13 +69453,13 @@ __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdl_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69472,7 +69468,7 @@ __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); return __ret; @@ -69480,13 +69476,13 @@ __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vabdl_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -69495,7 +69491,7 @@ __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); return __ret; @@ -69503,13 +69499,13 @@ __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdl_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69518,22 +69514,22 @@ __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); return __ret; } #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69545,13 +69541,13 @@ __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69563,13 +69559,13 @@ __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69581,13 +69577,13 @@ __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69599,13 +69595,13 @@ __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69617,13 +69613,13 @@ __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69640,4 +69636,3 @@ __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif /* if !defined(__ARM_NEON) */ #endif /* ifndef __ARM_FP */ -#endif /* __ARM_NEON_H */ diff --git a/lib/include/arm_sme.h b/lib/include/arm_sme.h index 2ed316f260..cbfea38fe4 100644 --- a/lib/include/arm_sme.h +++ b/lib/include/arm_sme.h @@ -16,6 +16,8 @@ #endif #include +#include + /* Function attributes */ #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) @@ -39,6 +41,11 @@ __ai bool __arm_in_streaming_mode(void) __arm_streaming_compatible { return x0 & 1; } +void *__arm_sc_memcpy(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memmove(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memset(void *s, int c, size_t n) __arm_streaming_compatible; +void *__arm_sc_memchr(void *s, int c, size_t n) __arm_streaming_compatible; + __ai __attribute__((target("sme"))) void svundef_za(void) __arm_streaming_compatible __arm_out("za") { } __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) @@ -368,7 +375,7 @@ void svwrite_ver_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za))) void svzero_mask_za(uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za))) -void svzero_za(); +void svzero_za(void); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m))) @@ -597,6 +604,78 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_ void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m))) void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svfloat16x4_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m))) void svmopa_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m))) @@ -2059,6 +2138,78 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x void svwrite_za8_vg1x4(uint32_t, svuint8x4_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4))) void svwrite_za8_vg1x4(uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svbfloat16x4_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2))) void svadd_za64_f64_vg1x2(uint32_t, svfloat64x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4))) @@ -2403,6 +2554,262 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s1 void svvdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4))) void svvdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u8))) +svuint8_t svreadz_hor_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u32))) +svuint32_t svreadz_hor_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u64))) +svuint64_t svreadz_hor_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u16))) +svuint16_t svreadz_hor_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_bf16))) +svbfloat16_t svreadz_hor_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s8))) +svint8_t svreadz_hor_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f64))) +svfloat64_t svreadz_hor_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f32))) +svfloat32_t svreadz_hor_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f16))) +svfloat16_t svreadz_hor_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s32))) +svint32_t svreadz_hor_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s64))) +svint64_t svreadz_hor_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s16))) +svint16_t svreadz_hor_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16))) +svuint16_t svreadz_hor_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16))) +svbfloat16_t svreadz_hor_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16))) +svfloat16_t svreadz_hor_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16))) +svint16_t svreadz_hor_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg2))) +svuint16x2_t svreadz_hor_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg2))) +svbfloat16x2_t svreadz_hor_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg2))) +svfloat16x2_t svreadz_hor_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg2))) +svint16x2_t svreadz_hor_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg4))) +svuint16x4_t svreadz_hor_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg4))) +svbfloat16x4_t svreadz_hor_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg4))) +svfloat16x4_t svreadz_hor_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg4))) +svint16x4_t svreadz_hor_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32))) +svuint32_t svreadz_hor_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32))) +svfloat32_t svreadz_hor_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32))) +svint32_t svreadz_hor_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg2))) +svuint32x2_t svreadz_hor_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg2))) +svfloat32x2_t svreadz_hor_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg2))) +svint32x2_t svreadz_hor_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg4))) +svuint32x4_t svreadz_hor_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg4))) +svfloat32x4_t svreadz_hor_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg4))) +svint32x4_t svreadz_hor_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64))) +svuint64_t svreadz_hor_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64))) +svfloat64_t svreadz_hor_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64))) +svint64_t svreadz_hor_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg2))) +svuint64x2_t svreadz_hor_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg2))) +svfloat64x2_t svreadz_hor_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg2))) +svint64x2_t svreadz_hor_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg4))) +svuint64x4_t svreadz_hor_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg4))) +svfloat64x4_t svreadz_hor_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg4))) +svint64x4_t svreadz_hor_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8))) +svuint8_t svreadz_hor_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8))) +svint8_t svreadz_hor_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg2))) +svuint8x2_t svreadz_hor_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg2))) +svint8x2_t svreadz_hor_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg4))) +svuint8x4_t svreadz_hor_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg4))) +svint8x4_t svreadz_hor_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u8))) +svuint8_t svreadz_ver_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u32))) +svuint32_t svreadz_ver_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u64))) +svuint64_t svreadz_ver_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u16))) +svuint16_t svreadz_ver_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_bf16))) +svbfloat16_t svreadz_ver_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s8))) +svint8_t svreadz_ver_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f64))) +svfloat64_t svreadz_ver_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f32))) +svfloat32_t svreadz_ver_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f16))) +svfloat16_t svreadz_ver_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s32))) +svint32_t svreadz_ver_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s64))) +svint64_t svreadz_ver_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s16))) +svint16_t svreadz_ver_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16))) +svuint16_t svreadz_ver_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16))) +svbfloat16_t svreadz_ver_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16))) +svfloat16_t svreadz_ver_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16))) +svint16_t svreadz_ver_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg2))) +svuint16x2_t svreadz_ver_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg2))) +svbfloat16x2_t svreadz_ver_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg2))) +svfloat16x2_t svreadz_ver_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg2))) +svint16x2_t svreadz_ver_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg4))) +svuint16x4_t svreadz_ver_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg4))) +svbfloat16x4_t svreadz_ver_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg4))) +svfloat16x4_t svreadz_ver_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg4))) +svint16x4_t svreadz_ver_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32))) +svuint32_t svreadz_ver_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32))) +svfloat32_t svreadz_ver_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32))) +svint32_t svreadz_ver_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg2))) +svuint32x2_t svreadz_ver_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg2))) +svfloat32x2_t svreadz_ver_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg2))) +svint32x2_t svreadz_ver_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg4))) +svuint32x4_t svreadz_ver_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg4))) +svfloat32x4_t svreadz_ver_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg4))) +svint32x4_t svreadz_ver_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64))) +svuint64_t svreadz_ver_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64))) +svfloat64_t svreadz_ver_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64))) +svint64_t svreadz_ver_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg2))) +svuint64x2_t svreadz_ver_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg2))) +svfloat64x2_t svreadz_ver_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg2))) +svint64x2_t svreadz_ver_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg4))) +svuint64x4_t svreadz_ver_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg4))) +svfloat64x4_t svreadz_ver_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg4))) +svint64x4_t svreadz_ver_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8))) +svuint8_t svreadz_ver_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8))) +svint8_t svreadz_ver_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg2))) +svuint8x2_t svreadz_ver_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg2))) +svint8x2_t svreadz_ver_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg4))) +svuint8x4_t svreadz_ver_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg4))) +svint8x4_t svreadz_ver_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x2))) +svuint16x2_t svreadz_za16_u16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x2))) +svbfloat16x2_t svreadz_za16_bf16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x2))) +svfloat16x2_t svreadz_za16_f16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x2))) +svint16x2_t svreadz_za16_s16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x4))) +svuint16x4_t svreadz_za16_u16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x4))) +svbfloat16x4_t svreadz_za16_bf16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x4))) +svfloat16x4_t svreadz_za16_f16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x4))) +svint16x4_t svreadz_za16_s16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x2))) +svuint32x2_t svreadz_za32_u32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x2))) +svfloat32x2_t svreadz_za32_f32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x2))) +svint32x2_t svreadz_za32_s32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x4))) +svuint32x4_t svreadz_za32_u32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x4))) +svfloat32x4_t svreadz_za32_f32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x4))) +svint32x4_t svreadz_za32_s32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x2))) +svuint64x2_t svreadz_za64_u64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x2))) +svfloat64x2_t svreadz_za64_f64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x2))) +svint64x2_t svreadz_za64_s64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x4))) +svuint64x4_t svreadz_za64_u64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x4))) +svfloat64x4_t svreadz_za64_f64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x4))) +svint64x4_t svreadz_za64_s64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x2))) +svuint8x2_t svreadz_za8_u8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x2))) +svint8x2_t svreadz_za8_s8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x4))) +svuint8x4_t svreadz_za8_u8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x4))) +svint8x4_t svreadz_za8_s8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x2))) +void svzero_za64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x4))) +void svzero_za64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x1))) +void svzero_za64_vg2x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x2))) +void svzero_za64_vg2x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x4))) +void svzero_za64_vg2x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x1))) +void svzero_za64_vg4x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x2))) +void svzero_za64_vg4x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x4))) +void svzero_za64_vg4x4(uint32_t); #ifdef __cplusplus } // extern "C" #endif diff --git a/lib/include/arm_sve.h b/lib/include/arm_sve.h index 3990f803c5..87691e03ce 100644 --- a/lib/include/arm_sve.h +++ b/lib/include/arm_sve.h @@ -124,3118 +124,2318 @@ enum svprfop #define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__)) -#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__) -#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__) -#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__) -#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__) -#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__) -#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__) -#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__) -#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__) -#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__) -#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__) -#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__) -#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__) -#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__) -#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__) -#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__) -#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__) -#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__) -#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__) -#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__) -#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__) -#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__) -#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__) -#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__) -#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__) -#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__) -#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__) -#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__) -#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__) -#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__) -#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__) -#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__) -#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__) -#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__) -#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__) -#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__) -#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__) -#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__) -#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__) -#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__) -#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__) -#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__) -#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__) -#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__) -#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__) -#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__) -#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__) -#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__) -#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__) -#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__) -#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__) -#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__) -#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__) -#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__) -#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__) -#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__) -#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__) -#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__) -#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__) -#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__) -#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__) -#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__) -#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__) -#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__) -#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__) -#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__) -#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__) -#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__) -#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__) -#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__) -#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__) -#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__) -#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__) -#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__) -#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__) -#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__) -#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__) -#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__) -#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__) -#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__) -#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__) -#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__) -#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__) -#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__) -#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__) -#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__) -#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__) -#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__) -#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__) -#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__) -#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__) -#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__) -#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__) -#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__) -#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__) -#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__) -#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__) -#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__) -#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__) -#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__) -#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__) -#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__) -#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__) -#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__) -#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__) -#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__) -#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__) -#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__) -#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__) -#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__) -#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__) -#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__) -#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__) -#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__) -#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__) -#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__) -#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__) -#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__) -#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__) -#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__) -#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__) -#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__) -#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__) -#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__) -#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__) -#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__) -#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__) -#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__) -#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__) -#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__) -#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__) -#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__) -#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__) -#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__) -#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__) -#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__) -#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__) -#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__) -#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__) -#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__) -#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__) -#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__) -#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__) -#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__) -#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64(op); -} - -#define svreinterpret_s8_s8_x2(...) __builtin_sve_reinterpret_s8_s8_x2(__VA_ARGS__) -#define svreinterpret_s8_u8_x2(...) __builtin_sve_reinterpret_s8_u8_x2(__VA_ARGS__) -#define svreinterpret_s8_s16_x2(...) __builtin_sve_reinterpret_s8_s16_x2(__VA_ARGS__) -#define svreinterpret_s8_u16_x2(...) __builtin_sve_reinterpret_s8_u16_x2(__VA_ARGS__) -#define svreinterpret_s8_s32_x2(...) __builtin_sve_reinterpret_s8_s32_x2(__VA_ARGS__) -#define svreinterpret_s8_u32_x2(...) __builtin_sve_reinterpret_s8_u32_x2(__VA_ARGS__) -#define svreinterpret_s8_s64_x2(...) __builtin_sve_reinterpret_s8_s64_x2(__VA_ARGS__) -#define svreinterpret_s8_u64_x2(...) __builtin_sve_reinterpret_s8_u64_x2(__VA_ARGS__) -#define svreinterpret_s8_f16_x2(...) __builtin_sve_reinterpret_s8_f16_x2(__VA_ARGS__) -#define svreinterpret_s8_bf16_x2(...) __builtin_sve_reinterpret_s8_bf16_x2(__VA_ARGS__) -#define svreinterpret_s8_f32_x2(...) __builtin_sve_reinterpret_s8_f32_x2(__VA_ARGS__) -#define svreinterpret_s8_f64_x2(...) __builtin_sve_reinterpret_s8_f64_x2(__VA_ARGS__) -#define svreinterpret_u8_s8_x2(...) __builtin_sve_reinterpret_u8_s8_x2(__VA_ARGS__) -#define svreinterpret_u8_u8_x2(...) __builtin_sve_reinterpret_u8_u8_x2(__VA_ARGS__) -#define svreinterpret_u8_s16_x2(...) __builtin_sve_reinterpret_u8_s16_x2(__VA_ARGS__) -#define svreinterpret_u8_u16_x2(...) __builtin_sve_reinterpret_u8_u16_x2(__VA_ARGS__) -#define svreinterpret_u8_s32_x2(...) __builtin_sve_reinterpret_u8_s32_x2(__VA_ARGS__) -#define svreinterpret_u8_u32_x2(...) __builtin_sve_reinterpret_u8_u32_x2(__VA_ARGS__) -#define svreinterpret_u8_s64_x2(...) __builtin_sve_reinterpret_u8_s64_x2(__VA_ARGS__) -#define svreinterpret_u8_u64_x2(...) __builtin_sve_reinterpret_u8_u64_x2(__VA_ARGS__) -#define svreinterpret_u8_f16_x2(...) __builtin_sve_reinterpret_u8_f16_x2(__VA_ARGS__) -#define svreinterpret_u8_bf16_x2(...) __builtin_sve_reinterpret_u8_bf16_x2(__VA_ARGS__) -#define svreinterpret_u8_f32_x2(...) __builtin_sve_reinterpret_u8_f32_x2(__VA_ARGS__) -#define svreinterpret_u8_f64_x2(...) __builtin_sve_reinterpret_u8_f64_x2(__VA_ARGS__) -#define svreinterpret_s16_s8_x2(...) __builtin_sve_reinterpret_s16_s8_x2(__VA_ARGS__) -#define svreinterpret_s16_u8_x2(...) __builtin_sve_reinterpret_s16_u8_x2(__VA_ARGS__) -#define svreinterpret_s16_s16_x2(...) __builtin_sve_reinterpret_s16_s16_x2(__VA_ARGS__) -#define svreinterpret_s16_u16_x2(...) __builtin_sve_reinterpret_s16_u16_x2(__VA_ARGS__) -#define svreinterpret_s16_s32_x2(...) __builtin_sve_reinterpret_s16_s32_x2(__VA_ARGS__) -#define svreinterpret_s16_u32_x2(...) __builtin_sve_reinterpret_s16_u32_x2(__VA_ARGS__) -#define svreinterpret_s16_s64_x2(...) __builtin_sve_reinterpret_s16_s64_x2(__VA_ARGS__) -#define svreinterpret_s16_u64_x2(...) __builtin_sve_reinterpret_s16_u64_x2(__VA_ARGS__) -#define svreinterpret_s16_f16_x2(...) __builtin_sve_reinterpret_s16_f16_x2(__VA_ARGS__) -#define svreinterpret_s16_bf16_x2(...) __builtin_sve_reinterpret_s16_bf16_x2(__VA_ARGS__) -#define svreinterpret_s16_f32_x2(...) __builtin_sve_reinterpret_s16_f32_x2(__VA_ARGS__) -#define svreinterpret_s16_f64_x2(...) __builtin_sve_reinterpret_s16_f64_x2(__VA_ARGS__) -#define svreinterpret_u16_s8_x2(...) __builtin_sve_reinterpret_u16_s8_x2(__VA_ARGS__) -#define svreinterpret_u16_u8_x2(...) __builtin_sve_reinterpret_u16_u8_x2(__VA_ARGS__) -#define svreinterpret_u16_s16_x2(...) __builtin_sve_reinterpret_u16_s16_x2(__VA_ARGS__) -#define svreinterpret_u16_u16_x2(...) __builtin_sve_reinterpret_u16_u16_x2(__VA_ARGS__) -#define svreinterpret_u16_s32_x2(...) __builtin_sve_reinterpret_u16_s32_x2(__VA_ARGS__) -#define svreinterpret_u16_u32_x2(...) __builtin_sve_reinterpret_u16_u32_x2(__VA_ARGS__) -#define svreinterpret_u16_s64_x2(...) __builtin_sve_reinterpret_u16_s64_x2(__VA_ARGS__) -#define svreinterpret_u16_u64_x2(...) __builtin_sve_reinterpret_u16_u64_x2(__VA_ARGS__) -#define svreinterpret_u16_f16_x2(...) __builtin_sve_reinterpret_u16_f16_x2(__VA_ARGS__) -#define svreinterpret_u16_bf16_x2(...) __builtin_sve_reinterpret_u16_bf16_x2(__VA_ARGS__) -#define svreinterpret_u16_f32_x2(...) __builtin_sve_reinterpret_u16_f32_x2(__VA_ARGS__) -#define svreinterpret_u16_f64_x2(...) __builtin_sve_reinterpret_u16_f64_x2(__VA_ARGS__) -#define svreinterpret_s32_s8_x2(...) __builtin_sve_reinterpret_s32_s8_x2(__VA_ARGS__) -#define svreinterpret_s32_u8_x2(...) __builtin_sve_reinterpret_s32_u8_x2(__VA_ARGS__) -#define svreinterpret_s32_s16_x2(...) __builtin_sve_reinterpret_s32_s16_x2(__VA_ARGS__) -#define svreinterpret_s32_u16_x2(...) __builtin_sve_reinterpret_s32_u16_x2(__VA_ARGS__) -#define svreinterpret_s32_s32_x2(...) __builtin_sve_reinterpret_s32_s32_x2(__VA_ARGS__) -#define svreinterpret_s32_u32_x2(...) __builtin_sve_reinterpret_s32_u32_x2(__VA_ARGS__) -#define svreinterpret_s32_s64_x2(...) __builtin_sve_reinterpret_s32_s64_x2(__VA_ARGS__) -#define svreinterpret_s32_u64_x2(...) __builtin_sve_reinterpret_s32_u64_x2(__VA_ARGS__) -#define svreinterpret_s32_f16_x2(...) __builtin_sve_reinterpret_s32_f16_x2(__VA_ARGS__) -#define svreinterpret_s32_bf16_x2(...) __builtin_sve_reinterpret_s32_bf16_x2(__VA_ARGS__) -#define svreinterpret_s32_f32_x2(...) __builtin_sve_reinterpret_s32_f32_x2(__VA_ARGS__) -#define svreinterpret_s32_f64_x2(...) __builtin_sve_reinterpret_s32_f64_x2(__VA_ARGS__) -#define svreinterpret_u32_s8_x2(...) __builtin_sve_reinterpret_u32_s8_x2(__VA_ARGS__) -#define svreinterpret_u32_u8_x2(...) __builtin_sve_reinterpret_u32_u8_x2(__VA_ARGS__) -#define svreinterpret_u32_s16_x2(...) __builtin_sve_reinterpret_u32_s16_x2(__VA_ARGS__) -#define svreinterpret_u32_u16_x2(...) __builtin_sve_reinterpret_u32_u16_x2(__VA_ARGS__) -#define svreinterpret_u32_s32_x2(...) __builtin_sve_reinterpret_u32_s32_x2(__VA_ARGS__) -#define svreinterpret_u32_u32_x2(...) __builtin_sve_reinterpret_u32_u32_x2(__VA_ARGS__) -#define svreinterpret_u32_s64_x2(...) __builtin_sve_reinterpret_u32_s64_x2(__VA_ARGS__) -#define svreinterpret_u32_u64_x2(...) __builtin_sve_reinterpret_u32_u64_x2(__VA_ARGS__) -#define svreinterpret_u32_f16_x2(...) __builtin_sve_reinterpret_u32_f16_x2(__VA_ARGS__) -#define svreinterpret_u32_bf16_x2(...) __builtin_sve_reinterpret_u32_bf16_x2(__VA_ARGS__) -#define svreinterpret_u32_f32_x2(...) __builtin_sve_reinterpret_u32_f32_x2(__VA_ARGS__) -#define svreinterpret_u32_f64_x2(...) __builtin_sve_reinterpret_u32_f64_x2(__VA_ARGS__) -#define svreinterpret_s64_s8_x2(...) __builtin_sve_reinterpret_s64_s8_x2(__VA_ARGS__) -#define svreinterpret_s64_u8_x2(...) __builtin_sve_reinterpret_s64_u8_x2(__VA_ARGS__) -#define svreinterpret_s64_s16_x2(...) __builtin_sve_reinterpret_s64_s16_x2(__VA_ARGS__) -#define svreinterpret_s64_u16_x2(...) __builtin_sve_reinterpret_s64_u16_x2(__VA_ARGS__) -#define svreinterpret_s64_s32_x2(...) __builtin_sve_reinterpret_s64_s32_x2(__VA_ARGS__) -#define svreinterpret_s64_u32_x2(...) __builtin_sve_reinterpret_s64_u32_x2(__VA_ARGS__) -#define svreinterpret_s64_s64_x2(...) __builtin_sve_reinterpret_s64_s64_x2(__VA_ARGS__) -#define svreinterpret_s64_u64_x2(...) __builtin_sve_reinterpret_s64_u64_x2(__VA_ARGS__) -#define svreinterpret_s64_f16_x2(...) __builtin_sve_reinterpret_s64_f16_x2(__VA_ARGS__) -#define svreinterpret_s64_bf16_x2(...) __builtin_sve_reinterpret_s64_bf16_x2(__VA_ARGS__) -#define svreinterpret_s64_f32_x2(...) __builtin_sve_reinterpret_s64_f32_x2(__VA_ARGS__) -#define svreinterpret_s64_f64_x2(...) __builtin_sve_reinterpret_s64_f64_x2(__VA_ARGS__) -#define svreinterpret_u64_s8_x2(...) __builtin_sve_reinterpret_u64_s8_x2(__VA_ARGS__) -#define svreinterpret_u64_u8_x2(...) __builtin_sve_reinterpret_u64_u8_x2(__VA_ARGS__) -#define svreinterpret_u64_s16_x2(...) __builtin_sve_reinterpret_u64_s16_x2(__VA_ARGS__) -#define svreinterpret_u64_u16_x2(...) __builtin_sve_reinterpret_u64_u16_x2(__VA_ARGS__) -#define svreinterpret_u64_s32_x2(...) __builtin_sve_reinterpret_u64_s32_x2(__VA_ARGS__) -#define svreinterpret_u64_u32_x2(...) __builtin_sve_reinterpret_u64_u32_x2(__VA_ARGS__) -#define svreinterpret_u64_s64_x2(...) __builtin_sve_reinterpret_u64_s64_x2(__VA_ARGS__) -#define svreinterpret_u64_u64_x2(...) __builtin_sve_reinterpret_u64_u64_x2(__VA_ARGS__) -#define svreinterpret_u64_f16_x2(...) __builtin_sve_reinterpret_u64_f16_x2(__VA_ARGS__) -#define svreinterpret_u64_bf16_x2(...) __builtin_sve_reinterpret_u64_bf16_x2(__VA_ARGS__) -#define svreinterpret_u64_f32_x2(...) __builtin_sve_reinterpret_u64_f32_x2(__VA_ARGS__) -#define svreinterpret_u64_f64_x2(...) __builtin_sve_reinterpret_u64_f64_x2(__VA_ARGS__) -#define svreinterpret_f16_s8_x2(...) __builtin_sve_reinterpret_f16_s8_x2(__VA_ARGS__) -#define svreinterpret_f16_u8_x2(...) __builtin_sve_reinterpret_f16_u8_x2(__VA_ARGS__) -#define svreinterpret_f16_s16_x2(...) __builtin_sve_reinterpret_f16_s16_x2(__VA_ARGS__) -#define svreinterpret_f16_u16_x2(...) __builtin_sve_reinterpret_f16_u16_x2(__VA_ARGS__) -#define svreinterpret_f16_s32_x2(...) __builtin_sve_reinterpret_f16_s32_x2(__VA_ARGS__) -#define svreinterpret_f16_u32_x2(...) __builtin_sve_reinterpret_f16_u32_x2(__VA_ARGS__) -#define svreinterpret_f16_s64_x2(...) __builtin_sve_reinterpret_f16_s64_x2(__VA_ARGS__) -#define svreinterpret_f16_u64_x2(...) __builtin_sve_reinterpret_f16_u64_x2(__VA_ARGS__) -#define svreinterpret_f16_f16_x2(...) __builtin_sve_reinterpret_f16_f16_x2(__VA_ARGS__) -#define svreinterpret_f16_bf16_x2(...) __builtin_sve_reinterpret_f16_bf16_x2(__VA_ARGS__) -#define svreinterpret_f16_f32_x2(...) __builtin_sve_reinterpret_f16_f32_x2(__VA_ARGS__) -#define svreinterpret_f16_f64_x2(...) __builtin_sve_reinterpret_f16_f64_x2(__VA_ARGS__) -#define svreinterpret_bf16_s8_x2(...) __builtin_sve_reinterpret_bf16_s8_x2(__VA_ARGS__) -#define svreinterpret_bf16_u8_x2(...) __builtin_sve_reinterpret_bf16_u8_x2(__VA_ARGS__) -#define svreinterpret_bf16_s16_x2(...) __builtin_sve_reinterpret_bf16_s16_x2(__VA_ARGS__) -#define svreinterpret_bf16_u16_x2(...) __builtin_sve_reinterpret_bf16_u16_x2(__VA_ARGS__) -#define svreinterpret_bf16_s32_x2(...) __builtin_sve_reinterpret_bf16_s32_x2(__VA_ARGS__) -#define svreinterpret_bf16_u32_x2(...) __builtin_sve_reinterpret_bf16_u32_x2(__VA_ARGS__) -#define svreinterpret_bf16_s64_x2(...) __builtin_sve_reinterpret_bf16_s64_x2(__VA_ARGS__) -#define svreinterpret_bf16_u64_x2(...) __builtin_sve_reinterpret_bf16_u64_x2(__VA_ARGS__) -#define svreinterpret_bf16_f16_x2(...) __builtin_sve_reinterpret_bf16_f16_x2(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x2(...) __builtin_sve_reinterpret_bf16_bf16_x2(__VA_ARGS__) -#define svreinterpret_bf16_f32_x2(...) __builtin_sve_reinterpret_bf16_f32_x2(__VA_ARGS__) -#define svreinterpret_bf16_f64_x2(...) __builtin_sve_reinterpret_bf16_f64_x2(__VA_ARGS__) -#define svreinterpret_f32_s8_x2(...) __builtin_sve_reinterpret_f32_s8_x2(__VA_ARGS__) -#define svreinterpret_f32_u8_x2(...) __builtin_sve_reinterpret_f32_u8_x2(__VA_ARGS__) -#define svreinterpret_f32_s16_x2(...) __builtin_sve_reinterpret_f32_s16_x2(__VA_ARGS__) -#define svreinterpret_f32_u16_x2(...) __builtin_sve_reinterpret_f32_u16_x2(__VA_ARGS__) -#define svreinterpret_f32_s32_x2(...) __builtin_sve_reinterpret_f32_s32_x2(__VA_ARGS__) -#define svreinterpret_f32_u32_x2(...) __builtin_sve_reinterpret_f32_u32_x2(__VA_ARGS__) -#define svreinterpret_f32_s64_x2(...) __builtin_sve_reinterpret_f32_s64_x2(__VA_ARGS__) -#define svreinterpret_f32_u64_x2(...) __builtin_sve_reinterpret_f32_u64_x2(__VA_ARGS__) -#define svreinterpret_f32_f16_x2(...) __builtin_sve_reinterpret_f32_f16_x2(__VA_ARGS__) -#define svreinterpret_f32_bf16_x2(...) __builtin_sve_reinterpret_f32_bf16_x2(__VA_ARGS__) -#define svreinterpret_f32_f32_x2(...) __builtin_sve_reinterpret_f32_f32_x2(__VA_ARGS__) -#define svreinterpret_f32_f64_x2(...) __builtin_sve_reinterpret_f32_f64_x2(__VA_ARGS__) -#define svreinterpret_f64_s8_x2(...) __builtin_sve_reinterpret_f64_s8_x2(__VA_ARGS__) -#define svreinterpret_f64_u8_x2(...) __builtin_sve_reinterpret_f64_u8_x2(__VA_ARGS__) -#define svreinterpret_f64_s16_x2(...) __builtin_sve_reinterpret_f64_s16_x2(__VA_ARGS__) -#define svreinterpret_f64_u16_x2(...) __builtin_sve_reinterpret_f64_u16_x2(__VA_ARGS__) -#define svreinterpret_f64_s32_x2(...) __builtin_sve_reinterpret_f64_s32_x2(__VA_ARGS__) -#define svreinterpret_f64_u32_x2(...) __builtin_sve_reinterpret_f64_u32_x2(__VA_ARGS__) -#define svreinterpret_f64_s64_x2(...) __builtin_sve_reinterpret_f64_s64_x2(__VA_ARGS__) -#define svreinterpret_f64_u64_x2(...) __builtin_sve_reinterpret_f64_u64_x2(__VA_ARGS__) -#define svreinterpret_f64_f16_x2(...) __builtin_sve_reinterpret_f64_f16_x2(__VA_ARGS__) -#define svreinterpret_f64_bf16_x2(...) __builtin_sve_reinterpret_f64_bf16_x2(__VA_ARGS__) -#define svreinterpret_f64_f32_x2(...) __builtin_sve_reinterpret_f64_f32_x2(__VA_ARGS__) -#define svreinterpret_f64_f64_x2(...) __builtin_sve_reinterpret_f64_f64_x2(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x2(op); -} - -#define svreinterpret_s8_s8_x3(...) __builtin_sve_reinterpret_s8_s8_x3(__VA_ARGS__) -#define svreinterpret_s8_u8_x3(...) __builtin_sve_reinterpret_s8_u8_x3(__VA_ARGS__) -#define svreinterpret_s8_s16_x3(...) __builtin_sve_reinterpret_s8_s16_x3(__VA_ARGS__) -#define svreinterpret_s8_u16_x3(...) __builtin_sve_reinterpret_s8_u16_x3(__VA_ARGS__) -#define svreinterpret_s8_s32_x3(...) __builtin_sve_reinterpret_s8_s32_x3(__VA_ARGS__) -#define svreinterpret_s8_u32_x3(...) __builtin_sve_reinterpret_s8_u32_x3(__VA_ARGS__) -#define svreinterpret_s8_s64_x3(...) __builtin_sve_reinterpret_s8_s64_x3(__VA_ARGS__) -#define svreinterpret_s8_u64_x3(...) __builtin_sve_reinterpret_s8_u64_x3(__VA_ARGS__) -#define svreinterpret_s8_f16_x3(...) __builtin_sve_reinterpret_s8_f16_x3(__VA_ARGS__) -#define svreinterpret_s8_bf16_x3(...) __builtin_sve_reinterpret_s8_bf16_x3(__VA_ARGS__) -#define svreinterpret_s8_f32_x3(...) __builtin_sve_reinterpret_s8_f32_x3(__VA_ARGS__) -#define svreinterpret_s8_f64_x3(...) __builtin_sve_reinterpret_s8_f64_x3(__VA_ARGS__) -#define svreinterpret_u8_s8_x3(...) __builtin_sve_reinterpret_u8_s8_x3(__VA_ARGS__) -#define svreinterpret_u8_u8_x3(...) __builtin_sve_reinterpret_u8_u8_x3(__VA_ARGS__) -#define svreinterpret_u8_s16_x3(...) __builtin_sve_reinterpret_u8_s16_x3(__VA_ARGS__) -#define svreinterpret_u8_u16_x3(...) __builtin_sve_reinterpret_u8_u16_x3(__VA_ARGS__) -#define svreinterpret_u8_s32_x3(...) __builtin_sve_reinterpret_u8_s32_x3(__VA_ARGS__) -#define svreinterpret_u8_u32_x3(...) __builtin_sve_reinterpret_u8_u32_x3(__VA_ARGS__) -#define svreinterpret_u8_s64_x3(...) __builtin_sve_reinterpret_u8_s64_x3(__VA_ARGS__) -#define svreinterpret_u8_u64_x3(...) __builtin_sve_reinterpret_u8_u64_x3(__VA_ARGS__) -#define svreinterpret_u8_f16_x3(...) __builtin_sve_reinterpret_u8_f16_x3(__VA_ARGS__) -#define svreinterpret_u8_bf16_x3(...) __builtin_sve_reinterpret_u8_bf16_x3(__VA_ARGS__) -#define svreinterpret_u8_f32_x3(...) __builtin_sve_reinterpret_u8_f32_x3(__VA_ARGS__) -#define svreinterpret_u8_f64_x3(...) __builtin_sve_reinterpret_u8_f64_x3(__VA_ARGS__) -#define svreinterpret_s16_s8_x3(...) __builtin_sve_reinterpret_s16_s8_x3(__VA_ARGS__) -#define svreinterpret_s16_u8_x3(...) __builtin_sve_reinterpret_s16_u8_x3(__VA_ARGS__) -#define svreinterpret_s16_s16_x3(...) __builtin_sve_reinterpret_s16_s16_x3(__VA_ARGS__) -#define svreinterpret_s16_u16_x3(...) __builtin_sve_reinterpret_s16_u16_x3(__VA_ARGS__) -#define svreinterpret_s16_s32_x3(...) __builtin_sve_reinterpret_s16_s32_x3(__VA_ARGS__) -#define svreinterpret_s16_u32_x3(...) __builtin_sve_reinterpret_s16_u32_x3(__VA_ARGS__) -#define svreinterpret_s16_s64_x3(...) __builtin_sve_reinterpret_s16_s64_x3(__VA_ARGS__) -#define svreinterpret_s16_u64_x3(...) __builtin_sve_reinterpret_s16_u64_x3(__VA_ARGS__) -#define svreinterpret_s16_f16_x3(...) __builtin_sve_reinterpret_s16_f16_x3(__VA_ARGS__) -#define svreinterpret_s16_bf16_x3(...) __builtin_sve_reinterpret_s16_bf16_x3(__VA_ARGS__) -#define svreinterpret_s16_f32_x3(...) __builtin_sve_reinterpret_s16_f32_x3(__VA_ARGS__) -#define svreinterpret_s16_f64_x3(...) __builtin_sve_reinterpret_s16_f64_x3(__VA_ARGS__) -#define svreinterpret_u16_s8_x3(...) __builtin_sve_reinterpret_u16_s8_x3(__VA_ARGS__) -#define svreinterpret_u16_u8_x3(...) __builtin_sve_reinterpret_u16_u8_x3(__VA_ARGS__) -#define svreinterpret_u16_s16_x3(...) __builtin_sve_reinterpret_u16_s16_x3(__VA_ARGS__) -#define svreinterpret_u16_u16_x3(...) __builtin_sve_reinterpret_u16_u16_x3(__VA_ARGS__) -#define svreinterpret_u16_s32_x3(...) __builtin_sve_reinterpret_u16_s32_x3(__VA_ARGS__) -#define svreinterpret_u16_u32_x3(...) __builtin_sve_reinterpret_u16_u32_x3(__VA_ARGS__) -#define svreinterpret_u16_s64_x3(...) __builtin_sve_reinterpret_u16_s64_x3(__VA_ARGS__) -#define svreinterpret_u16_u64_x3(...) __builtin_sve_reinterpret_u16_u64_x3(__VA_ARGS__) -#define svreinterpret_u16_f16_x3(...) __builtin_sve_reinterpret_u16_f16_x3(__VA_ARGS__) -#define svreinterpret_u16_bf16_x3(...) __builtin_sve_reinterpret_u16_bf16_x3(__VA_ARGS__) -#define svreinterpret_u16_f32_x3(...) __builtin_sve_reinterpret_u16_f32_x3(__VA_ARGS__) -#define svreinterpret_u16_f64_x3(...) __builtin_sve_reinterpret_u16_f64_x3(__VA_ARGS__) -#define svreinterpret_s32_s8_x3(...) __builtin_sve_reinterpret_s32_s8_x3(__VA_ARGS__) -#define svreinterpret_s32_u8_x3(...) __builtin_sve_reinterpret_s32_u8_x3(__VA_ARGS__) -#define svreinterpret_s32_s16_x3(...) __builtin_sve_reinterpret_s32_s16_x3(__VA_ARGS__) -#define svreinterpret_s32_u16_x3(...) __builtin_sve_reinterpret_s32_u16_x3(__VA_ARGS__) -#define svreinterpret_s32_s32_x3(...) __builtin_sve_reinterpret_s32_s32_x3(__VA_ARGS__) -#define svreinterpret_s32_u32_x3(...) __builtin_sve_reinterpret_s32_u32_x3(__VA_ARGS__) -#define svreinterpret_s32_s64_x3(...) __builtin_sve_reinterpret_s32_s64_x3(__VA_ARGS__) -#define svreinterpret_s32_u64_x3(...) __builtin_sve_reinterpret_s32_u64_x3(__VA_ARGS__) -#define svreinterpret_s32_f16_x3(...) __builtin_sve_reinterpret_s32_f16_x3(__VA_ARGS__) -#define svreinterpret_s32_bf16_x3(...) __builtin_sve_reinterpret_s32_bf16_x3(__VA_ARGS__) -#define svreinterpret_s32_f32_x3(...) __builtin_sve_reinterpret_s32_f32_x3(__VA_ARGS__) -#define svreinterpret_s32_f64_x3(...) __builtin_sve_reinterpret_s32_f64_x3(__VA_ARGS__) -#define svreinterpret_u32_s8_x3(...) __builtin_sve_reinterpret_u32_s8_x3(__VA_ARGS__) -#define svreinterpret_u32_u8_x3(...) __builtin_sve_reinterpret_u32_u8_x3(__VA_ARGS__) -#define svreinterpret_u32_s16_x3(...) __builtin_sve_reinterpret_u32_s16_x3(__VA_ARGS__) -#define svreinterpret_u32_u16_x3(...) __builtin_sve_reinterpret_u32_u16_x3(__VA_ARGS__) -#define svreinterpret_u32_s32_x3(...) __builtin_sve_reinterpret_u32_s32_x3(__VA_ARGS__) -#define svreinterpret_u32_u32_x3(...) __builtin_sve_reinterpret_u32_u32_x3(__VA_ARGS__) -#define svreinterpret_u32_s64_x3(...) __builtin_sve_reinterpret_u32_s64_x3(__VA_ARGS__) -#define svreinterpret_u32_u64_x3(...) __builtin_sve_reinterpret_u32_u64_x3(__VA_ARGS__) -#define svreinterpret_u32_f16_x3(...) __builtin_sve_reinterpret_u32_f16_x3(__VA_ARGS__) -#define svreinterpret_u32_bf16_x3(...) __builtin_sve_reinterpret_u32_bf16_x3(__VA_ARGS__) -#define svreinterpret_u32_f32_x3(...) __builtin_sve_reinterpret_u32_f32_x3(__VA_ARGS__) -#define svreinterpret_u32_f64_x3(...) __builtin_sve_reinterpret_u32_f64_x3(__VA_ARGS__) -#define svreinterpret_s64_s8_x3(...) __builtin_sve_reinterpret_s64_s8_x3(__VA_ARGS__) -#define svreinterpret_s64_u8_x3(...) __builtin_sve_reinterpret_s64_u8_x3(__VA_ARGS__) -#define svreinterpret_s64_s16_x3(...) __builtin_sve_reinterpret_s64_s16_x3(__VA_ARGS__) -#define svreinterpret_s64_u16_x3(...) __builtin_sve_reinterpret_s64_u16_x3(__VA_ARGS__) -#define svreinterpret_s64_s32_x3(...) __builtin_sve_reinterpret_s64_s32_x3(__VA_ARGS__) -#define svreinterpret_s64_u32_x3(...) __builtin_sve_reinterpret_s64_u32_x3(__VA_ARGS__) -#define svreinterpret_s64_s64_x3(...) __builtin_sve_reinterpret_s64_s64_x3(__VA_ARGS__) -#define svreinterpret_s64_u64_x3(...) __builtin_sve_reinterpret_s64_u64_x3(__VA_ARGS__) -#define svreinterpret_s64_f16_x3(...) __builtin_sve_reinterpret_s64_f16_x3(__VA_ARGS__) -#define svreinterpret_s64_bf16_x3(...) __builtin_sve_reinterpret_s64_bf16_x3(__VA_ARGS__) -#define svreinterpret_s64_f32_x3(...) __builtin_sve_reinterpret_s64_f32_x3(__VA_ARGS__) -#define svreinterpret_s64_f64_x3(...) __builtin_sve_reinterpret_s64_f64_x3(__VA_ARGS__) -#define svreinterpret_u64_s8_x3(...) __builtin_sve_reinterpret_u64_s8_x3(__VA_ARGS__) -#define svreinterpret_u64_u8_x3(...) __builtin_sve_reinterpret_u64_u8_x3(__VA_ARGS__) -#define svreinterpret_u64_s16_x3(...) __builtin_sve_reinterpret_u64_s16_x3(__VA_ARGS__) -#define svreinterpret_u64_u16_x3(...) __builtin_sve_reinterpret_u64_u16_x3(__VA_ARGS__) -#define svreinterpret_u64_s32_x3(...) __builtin_sve_reinterpret_u64_s32_x3(__VA_ARGS__) -#define svreinterpret_u64_u32_x3(...) __builtin_sve_reinterpret_u64_u32_x3(__VA_ARGS__) -#define svreinterpret_u64_s64_x3(...) __builtin_sve_reinterpret_u64_s64_x3(__VA_ARGS__) -#define svreinterpret_u64_u64_x3(...) __builtin_sve_reinterpret_u64_u64_x3(__VA_ARGS__) -#define svreinterpret_u64_f16_x3(...) __builtin_sve_reinterpret_u64_f16_x3(__VA_ARGS__) -#define svreinterpret_u64_bf16_x3(...) __builtin_sve_reinterpret_u64_bf16_x3(__VA_ARGS__) -#define svreinterpret_u64_f32_x3(...) __builtin_sve_reinterpret_u64_f32_x3(__VA_ARGS__) -#define svreinterpret_u64_f64_x3(...) __builtin_sve_reinterpret_u64_f64_x3(__VA_ARGS__) -#define svreinterpret_f16_s8_x3(...) __builtin_sve_reinterpret_f16_s8_x3(__VA_ARGS__) -#define svreinterpret_f16_u8_x3(...) __builtin_sve_reinterpret_f16_u8_x3(__VA_ARGS__) -#define svreinterpret_f16_s16_x3(...) __builtin_sve_reinterpret_f16_s16_x3(__VA_ARGS__) -#define svreinterpret_f16_u16_x3(...) __builtin_sve_reinterpret_f16_u16_x3(__VA_ARGS__) -#define svreinterpret_f16_s32_x3(...) __builtin_sve_reinterpret_f16_s32_x3(__VA_ARGS__) -#define svreinterpret_f16_u32_x3(...) __builtin_sve_reinterpret_f16_u32_x3(__VA_ARGS__) -#define svreinterpret_f16_s64_x3(...) __builtin_sve_reinterpret_f16_s64_x3(__VA_ARGS__) -#define svreinterpret_f16_u64_x3(...) __builtin_sve_reinterpret_f16_u64_x3(__VA_ARGS__) -#define svreinterpret_f16_f16_x3(...) __builtin_sve_reinterpret_f16_f16_x3(__VA_ARGS__) -#define svreinterpret_f16_bf16_x3(...) __builtin_sve_reinterpret_f16_bf16_x3(__VA_ARGS__) -#define svreinterpret_f16_f32_x3(...) __builtin_sve_reinterpret_f16_f32_x3(__VA_ARGS__) -#define svreinterpret_f16_f64_x3(...) __builtin_sve_reinterpret_f16_f64_x3(__VA_ARGS__) -#define svreinterpret_bf16_s8_x3(...) __builtin_sve_reinterpret_bf16_s8_x3(__VA_ARGS__) -#define svreinterpret_bf16_u8_x3(...) __builtin_sve_reinterpret_bf16_u8_x3(__VA_ARGS__) -#define svreinterpret_bf16_s16_x3(...) __builtin_sve_reinterpret_bf16_s16_x3(__VA_ARGS__) -#define svreinterpret_bf16_u16_x3(...) __builtin_sve_reinterpret_bf16_u16_x3(__VA_ARGS__) -#define svreinterpret_bf16_s32_x3(...) __builtin_sve_reinterpret_bf16_s32_x3(__VA_ARGS__) -#define svreinterpret_bf16_u32_x3(...) __builtin_sve_reinterpret_bf16_u32_x3(__VA_ARGS__) -#define svreinterpret_bf16_s64_x3(...) __builtin_sve_reinterpret_bf16_s64_x3(__VA_ARGS__) -#define svreinterpret_bf16_u64_x3(...) __builtin_sve_reinterpret_bf16_u64_x3(__VA_ARGS__) -#define svreinterpret_bf16_f16_x3(...) __builtin_sve_reinterpret_bf16_f16_x3(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x3(...) __builtin_sve_reinterpret_bf16_bf16_x3(__VA_ARGS__) -#define svreinterpret_bf16_f32_x3(...) __builtin_sve_reinterpret_bf16_f32_x3(__VA_ARGS__) -#define svreinterpret_bf16_f64_x3(...) __builtin_sve_reinterpret_bf16_f64_x3(__VA_ARGS__) -#define svreinterpret_f32_s8_x3(...) __builtin_sve_reinterpret_f32_s8_x3(__VA_ARGS__) -#define svreinterpret_f32_u8_x3(...) __builtin_sve_reinterpret_f32_u8_x3(__VA_ARGS__) -#define svreinterpret_f32_s16_x3(...) __builtin_sve_reinterpret_f32_s16_x3(__VA_ARGS__) -#define svreinterpret_f32_u16_x3(...) __builtin_sve_reinterpret_f32_u16_x3(__VA_ARGS__) -#define svreinterpret_f32_s32_x3(...) __builtin_sve_reinterpret_f32_s32_x3(__VA_ARGS__) -#define svreinterpret_f32_u32_x3(...) __builtin_sve_reinterpret_f32_u32_x3(__VA_ARGS__) -#define svreinterpret_f32_s64_x3(...) __builtin_sve_reinterpret_f32_s64_x3(__VA_ARGS__) -#define svreinterpret_f32_u64_x3(...) __builtin_sve_reinterpret_f32_u64_x3(__VA_ARGS__) -#define svreinterpret_f32_f16_x3(...) __builtin_sve_reinterpret_f32_f16_x3(__VA_ARGS__) -#define svreinterpret_f32_bf16_x3(...) __builtin_sve_reinterpret_f32_bf16_x3(__VA_ARGS__) -#define svreinterpret_f32_f32_x3(...) __builtin_sve_reinterpret_f32_f32_x3(__VA_ARGS__) -#define svreinterpret_f32_f64_x3(...) __builtin_sve_reinterpret_f32_f64_x3(__VA_ARGS__) -#define svreinterpret_f64_s8_x3(...) __builtin_sve_reinterpret_f64_s8_x3(__VA_ARGS__) -#define svreinterpret_f64_u8_x3(...) __builtin_sve_reinterpret_f64_u8_x3(__VA_ARGS__) -#define svreinterpret_f64_s16_x3(...) __builtin_sve_reinterpret_f64_s16_x3(__VA_ARGS__) -#define svreinterpret_f64_u16_x3(...) __builtin_sve_reinterpret_f64_u16_x3(__VA_ARGS__) -#define svreinterpret_f64_s32_x3(...) __builtin_sve_reinterpret_f64_s32_x3(__VA_ARGS__) -#define svreinterpret_f64_u32_x3(...) __builtin_sve_reinterpret_f64_u32_x3(__VA_ARGS__) -#define svreinterpret_f64_s64_x3(...) __builtin_sve_reinterpret_f64_s64_x3(__VA_ARGS__) -#define svreinterpret_f64_u64_x3(...) __builtin_sve_reinterpret_f64_u64_x3(__VA_ARGS__) -#define svreinterpret_f64_f16_x3(...) __builtin_sve_reinterpret_f64_f16_x3(__VA_ARGS__) -#define svreinterpret_f64_bf16_x3(...) __builtin_sve_reinterpret_f64_bf16_x3(__VA_ARGS__) -#define svreinterpret_f64_f32_x3(...) __builtin_sve_reinterpret_f64_f32_x3(__VA_ARGS__) -#define svreinterpret_f64_f64_x3(...) __builtin_sve_reinterpret_f64_f64_x3(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x3(op); -} - -#define svreinterpret_s8_s8_x4(...) __builtin_sve_reinterpret_s8_s8_x4(__VA_ARGS__) -#define svreinterpret_s8_u8_x4(...) __builtin_sve_reinterpret_s8_u8_x4(__VA_ARGS__) -#define svreinterpret_s8_s16_x4(...) __builtin_sve_reinterpret_s8_s16_x4(__VA_ARGS__) -#define svreinterpret_s8_u16_x4(...) __builtin_sve_reinterpret_s8_u16_x4(__VA_ARGS__) -#define svreinterpret_s8_s32_x4(...) __builtin_sve_reinterpret_s8_s32_x4(__VA_ARGS__) -#define svreinterpret_s8_u32_x4(...) __builtin_sve_reinterpret_s8_u32_x4(__VA_ARGS__) -#define svreinterpret_s8_s64_x4(...) __builtin_sve_reinterpret_s8_s64_x4(__VA_ARGS__) -#define svreinterpret_s8_u64_x4(...) __builtin_sve_reinterpret_s8_u64_x4(__VA_ARGS__) -#define svreinterpret_s8_f16_x4(...) __builtin_sve_reinterpret_s8_f16_x4(__VA_ARGS__) -#define svreinterpret_s8_bf16_x4(...) __builtin_sve_reinterpret_s8_bf16_x4(__VA_ARGS__) -#define svreinterpret_s8_f32_x4(...) __builtin_sve_reinterpret_s8_f32_x4(__VA_ARGS__) -#define svreinterpret_s8_f64_x4(...) __builtin_sve_reinterpret_s8_f64_x4(__VA_ARGS__) -#define svreinterpret_u8_s8_x4(...) __builtin_sve_reinterpret_u8_s8_x4(__VA_ARGS__) -#define svreinterpret_u8_u8_x4(...) __builtin_sve_reinterpret_u8_u8_x4(__VA_ARGS__) -#define svreinterpret_u8_s16_x4(...) __builtin_sve_reinterpret_u8_s16_x4(__VA_ARGS__) -#define svreinterpret_u8_u16_x4(...) __builtin_sve_reinterpret_u8_u16_x4(__VA_ARGS__) -#define svreinterpret_u8_s32_x4(...) __builtin_sve_reinterpret_u8_s32_x4(__VA_ARGS__) -#define svreinterpret_u8_u32_x4(...) __builtin_sve_reinterpret_u8_u32_x4(__VA_ARGS__) -#define svreinterpret_u8_s64_x4(...) __builtin_sve_reinterpret_u8_s64_x4(__VA_ARGS__) -#define svreinterpret_u8_u64_x4(...) __builtin_sve_reinterpret_u8_u64_x4(__VA_ARGS__) -#define svreinterpret_u8_f16_x4(...) __builtin_sve_reinterpret_u8_f16_x4(__VA_ARGS__) -#define svreinterpret_u8_bf16_x4(...) __builtin_sve_reinterpret_u8_bf16_x4(__VA_ARGS__) -#define svreinterpret_u8_f32_x4(...) __builtin_sve_reinterpret_u8_f32_x4(__VA_ARGS__) -#define svreinterpret_u8_f64_x4(...) __builtin_sve_reinterpret_u8_f64_x4(__VA_ARGS__) -#define svreinterpret_s16_s8_x4(...) __builtin_sve_reinterpret_s16_s8_x4(__VA_ARGS__) -#define svreinterpret_s16_u8_x4(...) __builtin_sve_reinterpret_s16_u8_x4(__VA_ARGS__) -#define svreinterpret_s16_s16_x4(...) __builtin_sve_reinterpret_s16_s16_x4(__VA_ARGS__) -#define svreinterpret_s16_u16_x4(...) __builtin_sve_reinterpret_s16_u16_x4(__VA_ARGS__) -#define svreinterpret_s16_s32_x4(...) __builtin_sve_reinterpret_s16_s32_x4(__VA_ARGS__) -#define svreinterpret_s16_u32_x4(...) __builtin_sve_reinterpret_s16_u32_x4(__VA_ARGS__) -#define svreinterpret_s16_s64_x4(...) __builtin_sve_reinterpret_s16_s64_x4(__VA_ARGS__) -#define svreinterpret_s16_u64_x4(...) __builtin_sve_reinterpret_s16_u64_x4(__VA_ARGS__) -#define svreinterpret_s16_f16_x4(...) __builtin_sve_reinterpret_s16_f16_x4(__VA_ARGS__) -#define svreinterpret_s16_bf16_x4(...) __builtin_sve_reinterpret_s16_bf16_x4(__VA_ARGS__) -#define svreinterpret_s16_f32_x4(...) __builtin_sve_reinterpret_s16_f32_x4(__VA_ARGS__) -#define svreinterpret_s16_f64_x4(...) __builtin_sve_reinterpret_s16_f64_x4(__VA_ARGS__) -#define svreinterpret_u16_s8_x4(...) __builtin_sve_reinterpret_u16_s8_x4(__VA_ARGS__) -#define svreinterpret_u16_u8_x4(...) __builtin_sve_reinterpret_u16_u8_x4(__VA_ARGS__) -#define svreinterpret_u16_s16_x4(...) __builtin_sve_reinterpret_u16_s16_x4(__VA_ARGS__) -#define svreinterpret_u16_u16_x4(...) __builtin_sve_reinterpret_u16_u16_x4(__VA_ARGS__) -#define svreinterpret_u16_s32_x4(...) __builtin_sve_reinterpret_u16_s32_x4(__VA_ARGS__) -#define svreinterpret_u16_u32_x4(...) __builtin_sve_reinterpret_u16_u32_x4(__VA_ARGS__) -#define svreinterpret_u16_s64_x4(...) __builtin_sve_reinterpret_u16_s64_x4(__VA_ARGS__) -#define svreinterpret_u16_u64_x4(...) __builtin_sve_reinterpret_u16_u64_x4(__VA_ARGS__) -#define svreinterpret_u16_f16_x4(...) __builtin_sve_reinterpret_u16_f16_x4(__VA_ARGS__) -#define svreinterpret_u16_bf16_x4(...) __builtin_sve_reinterpret_u16_bf16_x4(__VA_ARGS__) -#define svreinterpret_u16_f32_x4(...) __builtin_sve_reinterpret_u16_f32_x4(__VA_ARGS__) -#define svreinterpret_u16_f64_x4(...) __builtin_sve_reinterpret_u16_f64_x4(__VA_ARGS__) -#define svreinterpret_s32_s8_x4(...) __builtin_sve_reinterpret_s32_s8_x4(__VA_ARGS__) -#define svreinterpret_s32_u8_x4(...) __builtin_sve_reinterpret_s32_u8_x4(__VA_ARGS__) -#define svreinterpret_s32_s16_x4(...) __builtin_sve_reinterpret_s32_s16_x4(__VA_ARGS__) -#define svreinterpret_s32_u16_x4(...) __builtin_sve_reinterpret_s32_u16_x4(__VA_ARGS__) -#define svreinterpret_s32_s32_x4(...) __builtin_sve_reinterpret_s32_s32_x4(__VA_ARGS__) -#define svreinterpret_s32_u32_x4(...) __builtin_sve_reinterpret_s32_u32_x4(__VA_ARGS__) -#define svreinterpret_s32_s64_x4(...) __builtin_sve_reinterpret_s32_s64_x4(__VA_ARGS__) -#define svreinterpret_s32_u64_x4(...) __builtin_sve_reinterpret_s32_u64_x4(__VA_ARGS__) -#define svreinterpret_s32_f16_x4(...) __builtin_sve_reinterpret_s32_f16_x4(__VA_ARGS__) -#define svreinterpret_s32_bf16_x4(...) __builtin_sve_reinterpret_s32_bf16_x4(__VA_ARGS__) -#define svreinterpret_s32_f32_x4(...) __builtin_sve_reinterpret_s32_f32_x4(__VA_ARGS__) -#define svreinterpret_s32_f64_x4(...) __builtin_sve_reinterpret_s32_f64_x4(__VA_ARGS__) -#define svreinterpret_u32_s8_x4(...) __builtin_sve_reinterpret_u32_s8_x4(__VA_ARGS__) -#define svreinterpret_u32_u8_x4(...) __builtin_sve_reinterpret_u32_u8_x4(__VA_ARGS__) -#define svreinterpret_u32_s16_x4(...) __builtin_sve_reinterpret_u32_s16_x4(__VA_ARGS__) -#define svreinterpret_u32_u16_x4(...) __builtin_sve_reinterpret_u32_u16_x4(__VA_ARGS__) -#define svreinterpret_u32_s32_x4(...) __builtin_sve_reinterpret_u32_s32_x4(__VA_ARGS__) -#define svreinterpret_u32_u32_x4(...) __builtin_sve_reinterpret_u32_u32_x4(__VA_ARGS__) -#define svreinterpret_u32_s64_x4(...) __builtin_sve_reinterpret_u32_s64_x4(__VA_ARGS__) -#define svreinterpret_u32_u64_x4(...) __builtin_sve_reinterpret_u32_u64_x4(__VA_ARGS__) -#define svreinterpret_u32_f16_x4(...) __builtin_sve_reinterpret_u32_f16_x4(__VA_ARGS__) -#define svreinterpret_u32_bf16_x4(...) __builtin_sve_reinterpret_u32_bf16_x4(__VA_ARGS__) -#define svreinterpret_u32_f32_x4(...) __builtin_sve_reinterpret_u32_f32_x4(__VA_ARGS__) -#define svreinterpret_u32_f64_x4(...) __builtin_sve_reinterpret_u32_f64_x4(__VA_ARGS__) -#define svreinterpret_s64_s8_x4(...) __builtin_sve_reinterpret_s64_s8_x4(__VA_ARGS__) -#define svreinterpret_s64_u8_x4(...) __builtin_sve_reinterpret_s64_u8_x4(__VA_ARGS__) -#define svreinterpret_s64_s16_x4(...) __builtin_sve_reinterpret_s64_s16_x4(__VA_ARGS__) -#define svreinterpret_s64_u16_x4(...) __builtin_sve_reinterpret_s64_u16_x4(__VA_ARGS__) -#define svreinterpret_s64_s32_x4(...) __builtin_sve_reinterpret_s64_s32_x4(__VA_ARGS__) -#define svreinterpret_s64_u32_x4(...) __builtin_sve_reinterpret_s64_u32_x4(__VA_ARGS__) -#define svreinterpret_s64_s64_x4(...) __builtin_sve_reinterpret_s64_s64_x4(__VA_ARGS__) -#define svreinterpret_s64_u64_x4(...) __builtin_sve_reinterpret_s64_u64_x4(__VA_ARGS__) -#define svreinterpret_s64_f16_x4(...) __builtin_sve_reinterpret_s64_f16_x4(__VA_ARGS__) -#define svreinterpret_s64_bf16_x4(...) __builtin_sve_reinterpret_s64_bf16_x4(__VA_ARGS__) -#define svreinterpret_s64_f32_x4(...) __builtin_sve_reinterpret_s64_f32_x4(__VA_ARGS__) -#define svreinterpret_s64_f64_x4(...) __builtin_sve_reinterpret_s64_f64_x4(__VA_ARGS__) -#define svreinterpret_u64_s8_x4(...) __builtin_sve_reinterpret_u64_s8_x4(__VA_ARGS__) -#define svreinterpret_u64_u8_x4(...) __builtin_sve_reinterpret_u64_u8_x4(__VA_ARGS__) -#define svreinterpret_u64_s16_x4(...) __builtin_sve_reinterpret_u64_s16_x4(__VA_ARGS__) -#define svreinterpret_u64_u16_x4(...) __builtin_sve_reinterpret_u64_u16_x4(__VA_ARGS__) -#define svreinterpret_u64_s32_x4(...) __builtin_sve_reinterpret_u64_s32_x4(__VA_ARGS__) -#define svreinterpret_u64_u32_x4(...) __builtin_sve_reinterpret_u64_u32_x4(__VA_ARGS__) -#define svreinterpret_u64_s64_x4(...) __builtin_sve_reinterpret_u64_s64_x4(__VA_ARGS__) -#define svreinterpret_u64_u64_x4(...) __builtin_sve_reinterpret_u64_u64_x4(__VA_ARGS__) -#define svreinterpret_u64_f16_x4(...) __builtin_sve_reinterpret_u64_f16_x4(__VA_ARGS__) -#define svreinterpret_u64_bf16_x4(...) __builtin_sve_reinterpret_u64_bf16_x4(__VA_ARGS__) -#define svreinterpret_u64_f32_x4(...) __builtin_sve_reinterpret_u64_f32_x4(__VA_ARGS__) -#define svreinterpret_u64_f64_x4(...) __builtin_sve_reinterpret_u64_f64_x4(__VA_ARGS__) -#define svreinterpret_f16_s8_x4(...) __builtin_sve_reinterpret_f16_s8_x4(__VA_ARGS__) -#define svreinterpret_f16_u8_x4(...) __builtin_sve_reinterpret_f16_u8_x4(__VA_ARGS__) -#define svreinterpret_f16_s16_x4(...) __builtin_sve_reinterpret_f16_s16_x4(__VA_ARGS__) -#define svreinterpret_f16_u16_x4(...) __builtin_sve_reinterpret_f16_u16_x4(__VA_ARGS__) -#define svreinterpret_f16_s32_x4(...) __builtin_sve_reinterpret_f16_s32_x4(__VA_ARGS__) -#define svreinterpret_f16_u32_x4(...) __builtin_sve_reinterpret_f16_u32_x4(__VA_ARGS__) -#define svreinterpret_f16_s64_x4(...) __builtin_sve_reinterpret_f16_s64_x4(__VA_ARGS__) -#define svreinterpret_f16_u64_x4(...) __builtin_sve_reinterpret_f16_u64_x4(__VA_ARGS__) -#define svreinterpret_f16_f16_x4(...) __builtin_sve_reinterpret_f16_f16_x4(__VA_ARGS__) -#define svreinterpret_f16_bf16_x4(...) __builtin_sve_reinterpret_f16_bf16_x4(__VA_ARGS__) -#define svreinterpret_f16_f32_x4(...) __builtin_sve_reinterpret_f16_f32_x4(__VA_ARGS__) -#define svreinterpret_f16_f64_x4(...) __builtin_sve_reinterpret_f16_f64_x4(__VA_ARGS__) -#define svreinterpret_bf16_s8_x4(...) __builtin_sve_reinterpret_bf16_s8_x4(__VA_ARGS__) -#define svreinterpret_bf16_u8_x4(...) __builtin_sve_reinterpret_bf16_u8_x4(__VA_ARGS__) -#define svreinterpret_bf16_s16_x4(...) __builtin_sve_reinterpret_bf16_s16_x4(__VA_ARGS__) -#define svreinterpret_bf16_u16_x4(...) __builtin_sve_reinterpret_bf16_u16_x4(__VA_ARGS__) -#define svreinterpret_bf16_s32_x4(...) __builtin_sve_reinterpret_bf16_s32_x4(__VA_ARGS__) -#define svreinterpret_bf16_u32_x4(...) __builtin_sve_reinterpret_bf16_u32_x4(__VA_ARGS__) -#define svreinterpret_bf16_s64_x4(...) __builtin_sve_reinterpret_bf16_s64_x4(__VA_ARGS__) -#define svreinterpret_bf16_u64_x4(...) __builtin_sve_reinterpret_bf16_u64_x4(__VA_ARGS__) -#define svreinterpret_bf16_f16_x4(...) __builtin_sve_reinterpret_bf16_f16_x4(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x4(...) __builtin_sve_reinterpret_bf16_bf16_x4(__VA_ARGS__) -#define svreinterpret_bf16_f32_x4(...) __builtin_sve_reinterpret_bf16_f32_x4(__VA_ARGS__) -#define svreinterpret_bf16_f64_x4(...) __builtin_sve_reinterpret_bf16_f64_x4(__VA_ARGS__) -#define svreinterpret_f32_s8_x4(...) __builtin_sve_reinterpret_f32_s8_x4(__VA_ARGS__) -#define svreinterpret_f32_u8_x4(...) __builtin_sve_reinterpret_f32_u8_x4(__VA_ARGS__) -#define svreinterpret_f32_s16_x4(...) __builtin_sve_reinterpret_f32_s16_x4(__VA_ARGS__) -#define svreinterpret_f32_u16_x4(...) __builtin_sve_reinterpret_f32_u16_x4(__VA_ARGS__) -#define svreinterpret_f32_s32_x4(...) __builtin_sve_reinterpret_f32_s32_x4(__VA_ARGS__) -#define svreinterpret_f32_u32_x4(...) __builtin_sve_reinterpret_f32_u32_x4(__VA_ARGS__) -#define svreinterpret_f32_s64_x4(...) __builtin_sve_reinterpret_f32_s64_x4(__VA_ARGS__) -#define svreinterpret_f32_u64_x4(...) __builtin_sve_reinterpret_f32_u64_x4(__VA_ARGS__) -#define svreinterpret_f32_f16_x4(...) __builtin_sve_reinterpret_f32_f16_x4(__VA_ARGS__) -#define svreinterpret_f32_bf16_x4(...) __builtin_sve_reinterpret_f32_bf16_x4(__VA_ARGS__) -#define svreinterpret_f32_f32_x4(...) __builtin_sve_reinterpret_f32_f32_x4(__VA_ARGS__) -#define svreinterpret_f32_f64_x4(...) __builtin_sve_reinterpret_f32_f64_x4(__VA_ARGS__) -#define svreinterpret_f64_s8_x4(...) __builtin_sve_reinterpret_f64_s8_x4(__VA_ARGS__) -#define svreinterpret_f64_u8_x4(...) __builtin_sve_reinterpret_f64_u8_x4(__VA_ARGS__) -#define svreinterpret_f64_s16_x4(...) __builtin_sve_reinterpret_f64_s16_x4(__VA_ARGS__) -#define svreinterpret_f64_u16_x4(...) __builtin_sve_reinterpret_f64_u16_x4(__VA_ARGS__) -#define svreinterpret_f64_s32_x4(...) __builtin_sve_reinterpret_f64_s32_x4(__VA_ARGS__) -#define svreinterpret_f64_u32_x4(...) __builtin_sve_reinterpret_f64_u32_x4(__VA_ARGS__) -#define svreinterpret_f64_s64_x4(...) __builtin_sve_reinterpret_f64_s64_x4(__VA_ARGS__) -#define svreinterpret_f64_u64_x4(...) __builtin_sve_reinterpret_f64_u64_x4(__VA_ARGS__) -#define svreinterpret_f64_f16_x4(...) __builtin_sve_reinterpret_f64_f16_x4(__VA_ARGS__) -#define svreinterpret_f64_bf16_x4(...) __builtin_sve_reinterpret_f64_bf16_x4(__VA_ARGS__) -#define svreinterpret_f64_f32_x4(...) __builtin_sve_reinterpret_f64_f32_x4(__VA_ARGS__) -#define svreinterpret_f64_f64_x4(...) __builtin_sve_reinterpret_f64_f64_x4(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x4(op); -} - -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) -svbfloat16_t svadd_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) -svbfloat16_t svadd_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) -svbfloat16_t svadd_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) -svbfloat16_t svadd_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) -svbfloat16_t svadd_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) -svbfloat16_t svadd_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) -svbfloat16_t svclamp_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) -svbfloat16_t svmax_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) -svbfloat16_t svmax_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) -svbfloat16_t svmax_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) -svbfloat16_t svmax_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) -svbfloat16_t svmax_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) -svbfloat16_t svmax_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) -svbfloat16_t svmaxnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) -svbfloat16_t svmaxnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) -svbfloat16_t svmaxnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) -svbfloat16_t svmaxnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) -svbfloat16_t svmaxnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) -svbfloat16_t svmaxnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) -svbfloat16_t svmin_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) -svbfloat16_t svmin_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) -svbfloat16_t svmin_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) -svbfloat16_t svmin_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) -svbfloat16_t svmin_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) -svbfloat16_t svmin_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) -svbfloat16_t svminnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) -svbfloat16_t svminnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) -svbfloat16_t svminnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) -svbfloat16_t svminnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) -svbfloat16_t svminnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) -svbfloat16_t svminnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) -svbfloat16_t svmla_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) -svbfloat16_t svmla_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) -svbfloat16_t svmla_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) -svbfloat16_t svmla_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) -svbfloat16_t svmla_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) -svbfloat16_t svmla_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) -svbfloat16_t svmla_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) -svbfloat16_t svmls_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) -svbfloat16_t svmls_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) -svbfloat16_t svmls_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) -svbfloat16_t svmls_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) -svbfloat16_t svmls_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) -svbfloat16_t svmls_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) -svbfloat16_t svmls_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) -svbfloat16_t svmul_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) -svbfloat16_t svmul_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) -svbfloat16_t svmul_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) -svbfloat16_t svmul_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) -svbfloat16_t svmul_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) -svbfloat16_t svmul_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) -svbfloat16_t svmul_lane_bf16(svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) -svbfloat16_t svsub_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) -svbfloat16_t svsub_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) -svbfloat16_t svsub_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) -svbfloat16_t svsub_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) -svbfloat16_t svsub_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) -svbfloat16_t svsub_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) -svbfloat16_t svadd_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) -svbfloat16_t svadd_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) -svbfloat16_t svadd_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) -svbfloat16_t svadd_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) -svbfloat16_t svadd_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) -svbfloat16_t svadd_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) -svbfloat16_t svclamp(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) -svbfloat16_t svmax_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) -svbfloat16_t svmax_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) -svbfloat16_t svmax_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) -svbfloat16_t svmax_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) -svbfloat16_t svmax_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) -svbfloat16_t svmax_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) -svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) -svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) -svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) -svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) -svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) -svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) -svbfloat16_t svmin_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) -svbfloat16_t svmin_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) -svbfloat16_t svmin_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) -svbfloat16_t svmin_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) -svbfloat16_t svmin_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) -svbfloat16_t svmin_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) -svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) -svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) -svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) -svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) -svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) -svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) -svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) -svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) -svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) -svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) -svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) -svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) -svbfloat16_t svmla_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) -svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) -svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) -svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) -svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) -svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) -svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) -svbfloat16_t svmls_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) -svbfloat16_t svmul_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) -svbfloat16_t svmul_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) -svbfloat16_t svmul_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) -svbfloat16_t svmul_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) -svbfloat16_t svmul_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) -svbfloat16_t svmul_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) -svbfloat16_t svmul_lane(svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) -svbfloat16_t svsub_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) -svbfloat16_t svsub_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) -svbfloat16_t svsub_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) -svbfloat16_t svsub_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) -svbfloat16_t svsub_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) -svbfloat16_t svsub_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64(svfloat64x4_t op); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32_f16_x2(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32_f16_x2(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32(svfloat16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) svuint8x2_t svadd_single_u8_x2(svuint8x2_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) @@ -3652,10 +2852,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x svuint8_t svqrshrun_n_u8_s32_x4(svint32x4_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) svuint16_t svqrshrun_n_u16_s64_x4(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) -svbool_t svreinterpret_b(svcount_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) -svcount_t svreinterpret_c(svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) svfloat32x2_t svrinta_f32_x2(svfloat32x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) @@ -4416,10 +3612,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_ svuint8_t svqrshrun_u8(svint32x4_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) svuint16_t svqrshrun_u16(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) -svbool_t svreinterpret(svcount_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) -svcount_t svreinterpret(svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) svfloat32x2_t svrinta(svfloat32x2_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) @@ -4764,18 +3956,14120 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) svint64x4_t svzipq(svint64x4_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) svint16x4_t svzipq(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp_single_bf16_x2(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp_single_bf16_x4(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa_f64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa_f32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa_f16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) +svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) +svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) +svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) +svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) +svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) +svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) +svuint32_t svldff1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) +svuint64_t svldff1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) +svuint16_t svldff1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) +svint32_t svldff1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) +svint64_t svldff1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) +svint16_t svldff1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) +svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) +svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) +svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) +svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) +svuint32_t svldff1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) +svuint64_t svldff1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) +svint32_t svldff1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) +svint64_t svldff1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) +svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) +svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) +svuint64_t svldff1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) +svint64_t svldff1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) +svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) +svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) +svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) +svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) +svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) +svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) +svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) +svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) +svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) +svint32_t svldff1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) +svint64_t svldff1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) +svint16_t svldff1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) +svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) +svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) +svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) +svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) +svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) +svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) +svint32_t svldff1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) +svint64_t svldff1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) +svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) +svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) +svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) +svint64_t svldff1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) +svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) +svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) +svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) +svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) +svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) +svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) +svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) +svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) +svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) +svint32_t svldnf1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) +svint64_t svldnf1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) +svint16_t svldnf1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) +svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) +svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) +svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) +svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) +svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) +svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) +svint32_t svldnf1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) +svint64_t svldnf1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) +svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) +svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) +svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) +svint64_t svldnf1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) +svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) +svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) +svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) +svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) +svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) +svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) +svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) +svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) +svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) +svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) +svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) +svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) +svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) +svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) +svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) +svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) +svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) +svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) +svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) +svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) +svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) +svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) +svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) +svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) +svbool_t svrdffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) +svbool_t svrdffr_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) +void svsetffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) +void svwrffr(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_offset(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_offset(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_offset(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_offset(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_n_bf16(bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) +svbfloat16x2_t svundef2_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) +svbfloat16x3_t svundef3_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) +svbfloat16x4_t svundef4_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) +svbfloat16_t svundef_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_bf16(bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2(svbfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3(svbfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4(svbfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot(svint32_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot(svint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot(svint32_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp_u16(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1_s64(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) +uint8x16_t svaddqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) +uint32x4_t svaddqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) +uint64x2_t svaddqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) +uint16x8_t svaddqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) +int8x16_t svaddqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) +int32x4_t svaddqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) +int64x2_t svaddqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) +int16x8_t svaddqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) +uint8x16_t svandqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) +uint32x4_t svandqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) +uint64x2_t svandqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) +uint16x8_t svandqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) +int8x16_t svandqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) +int32x4_t svandqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) +int64x2_t svandqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) +int16x8_t svandqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) +uint8x16_t sveorqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) +uint32x4_t sveorqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) +uint64x2_t sveorqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) +uint16x8_t sveorqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) +int8x16_t sveorqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) +int32x4_t sveorqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) +int64x2_t sveorqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) +int16x8_t sveorqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) +svuint8_t svextq_u8(svuint8_t, svuint8_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) +svuint32_t svextq_u32(svuint32_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) +svuint64_t svextq_u64(svuint64_t, svuint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) +svuint16_t svextq_u16(svuint16_t, svuint16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) +svbfloat16_t svextq_bf16(svbfloat16_t, svbfloat16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) +svint8_t svextq_s8(svint8_t, svint8_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) +svfloat64_t svextq_f64(svfloat64_t, svfloat64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) +svfloat32_t svextq_f32(svfloat32_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) +svfloat16_t svextq_f16(svfloat16_t, svfloat16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) +svint32_t svextq_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) +svint64_t svextq_s64(svint64_t, svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) +svint16_t svextq_s16(svint16_t, svint16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) +svuint32_t svld1q_gather_u64base_index_u32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) +svuint64_t svld1q_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) +svuint16_t svld1q_gather_u64base_index_u16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) +svbfloat16_t svld1q_gather_u64base_index_bf16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) +svfloat64_t svld1q_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) +svfloat32_t svld1q_gather_u64base_index_f32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) +svfloat16_t svld1q_gather_u64base_index_f16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) +svint32_t svld1q_gather_u64base_index_s32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) +svint64_t svld1q_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) +svint16_t svld1q_gather_u64base_index_s16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) +svuint8_t svld1q_gather_u64base_offset_u8(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) +svuint32_t svld1q_gather_u64base_offset_u32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) +svuint64_t svld1q_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) +svuint16_t svld1q_gather_u64base_offset_u16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) +svbfloat16_t svld1q_gather_u64base_offset_bf16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) +svint8_t svld1q_gather_u64base_offset_s8(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) +svfloat64_t svld1q_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) +svfloat32_t svld1q_gather_u64base_offset_f32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) +svfloat16_t svld1q_gather_u64base_offset_f16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) +svint32_t svld1q_gather_u64base_offset_s32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) +svint64_t svld1q_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) +svint16_t svld1q_gather_u64base_offset_s16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) +svuint8_t svld1q_gather_u64base_u8(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) +svuint32_t svld1q_gather_u64base_u32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) +svuint64_t svld1q_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) +svuint16_t svld1q_gather_u64base_u16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) +svbfloat16_t svld1q_gather_u64base_bf16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) +svint8_t svld1q_gather_u64base_s8(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) +svfloat64_t svld1q_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) +svfloat32_t svld1q_gather_u64base_f32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) +svfloat16_t svld1q_gather_u64base_f16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) +svint32_t svld1q_gather_u64base_s32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) +svint64_t svld1q_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) +svint16_t svld1q_gather_u64base_s16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) +svuint32_t svld1q_gather_u64index_u32(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) +svuint64_t svld1q_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) +svuint16_t svld1q_gather_u64index_u16(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) +svbfloat16_t svld1q_gather_u64index_bf16(svbool_t, bfloat16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) +svfloat64_t svld1q_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) +svfloat32_t svld1q_gather_u64index_f32(svbool_t, float32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) +svfloat16_t svld1q_gather_u64index_f16(svbool_t, float16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) +svint32_t svld1q_gather_u64index_s32(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) +svint64_t svld1q_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) +svint16_t svld1q_gather_u64index_s16(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) +svuint8_t svld1q_gather_u64offset_u8(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) +svuint32_t svld1q_gather_u64offset_u32(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) +svuint64_t svld1q_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) +svuint16_t svld1q_gather_u64offset_u16(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) +svbfloat16_t svld1q_gather_u64offset_bf16(svbool_t, bfloat16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) +svint8_t svld1q_gather_u64offset_s8(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) +svfloat64_t svld1q_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) +svfloat32_t svld1q_gather_u64offset_f32(svbool_t, float32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) +svfloat16_t svld1q_gather_u64offset_f16(svbool_t, float16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) +svint32_t svld1q_gather_u64offset_s32(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) +svint64_t svld1q_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) +svint16_t svld1q_gather_u64offset_s16(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) +svuint64_t svld1udq_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) +svfloat64_t svld1udq_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) +svint64_t svld1udq_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) +svuint64_t svld1udq_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) +svfloat64_t svld1udq_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) +svint64_t svld1udq_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) +svuint32_t svld1uwq_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) +svfloat32_t svld1uwq_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) +svint32_t svld1uwq_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) +svuint32_t svld1uwq_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) +svfloat32_t svld1uwq_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) +svint32_t svld1uwq_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) +svuint8x2_t svld2q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) +svuint32x2_t svld2q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) +svuint64x2_t svld2q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) +svuint16x2_t svld2q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) +svint8x2_t svld2q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) +svfloat64x2_t svld2q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) +svfloat32x2_t svld2q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) +svfloat16x2_t svld2q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) +svint32x2_t svld2q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) +svint64x2_t svld2q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) +svint16x2_t svld2q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) +svbfloat16x2_t svld2q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) +svuint8x2_t svld2q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) +svuint32x2_t svld2q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) +svuint64x2_t svld2q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) +svuint16x2_t svld2q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) +svint8x2_t svld2q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) +svfloat64x2_t svld2q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) +svfloat32x2_t svld2q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) +svfloat16x2_t svld2q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) +svint32x2_t svld2q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) +svint64x2_t svld2q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) +svint16x2_t svld2q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) +svbfloat16x2_t svld2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) +svuint8x3_t svld3q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) +svuint32x3_t svld3q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) +svuint64x3_t svld3q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) +svuint16x3_t svld3q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) +svint8x3_t svld3q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) +svfloat64x3_t svld3q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) +svfloat32x3_t svld3q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) +svfloat16x3_t svld3q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) +svint32x3_t svld3q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) +svint64x3_t svld3q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) +svint16x3_t svld3q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) +svbfloat16x3_t svld3q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) +svuint8x3_t svld3q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) +svuint32x3_t svld3q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) +svuint64x3_t svld3q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) +svuint16x3_t svld3q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) +svint8x3_t svld3q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) +svfloat64x3_t svld3q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) +svfloat32x3_t svld3q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) +svfloat16x3_t svld3q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) +svint32x3_t svld3q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) +svint64x3_t svld3q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) +svint16x3_t svld3q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) +svbfloat16x3_t svld3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) +svuint8x4_t svld4q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) +svuint32x4_t svld4q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) +svuint64x4_t svld4q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) +svuint16x4_t svld4q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) +svint8x4_t svld4q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) +svfloat64x4_t svld4q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) +svfloat32x4_t svld4q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) +svfloat16x4_t svld4q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) +svint32x4_t svld4q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) +svint64x4_t svld4q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) +svint16x4_t svld4q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) +svbfloat16x4_t svld4q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) +svuint8x4_t svld4q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) +svuint32x4_t svld4q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) +svuint64x4_t svld4q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) +svuint16x4_t svld4q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) +svint8x4_t svld4q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) +svfloat64x4_t svld4q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) +svfloat32x4_t svld4q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) +svfloat16x4_t svld4q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) +svint32x4_t svld4q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) +svint64x4_t svld4q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) +svint16x4_t svld4q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) +svbfloat16x4_t svld4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) +float64x2_t svmaxnmqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) +float32x4_t svmaxnmqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) +float16x8_t svmaxnmqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) +float64x2_t svmaxqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) +float32x4_t svmaxqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) +float16x8_t svmaxqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) +int8x16_t svmaxqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) +int32x4_t svmaxqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) +int64x2_t svmaxqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) +int16x8_t svmaxqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) +uint8x16_t svmaxqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) +uint32x4_t svmaxqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) +uint64x2_t svmaxqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) +uint16x8_t svmaxqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) +float64x2_t svminnmqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) +float32x4_t svminnmqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) +float16x8_t svminnmqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) +float64x2_t svminqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) +float32x4_t svminqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) +float16x8_t svminqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) +int8x16_t svminqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) +int32x4_t svminqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) +int64x2_t svminqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) +int16x8_t svminqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) +uint8x16_t svminqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) +uint32x4_t svminqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) +uint64x2_t svminqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) +uint16x8_t svminqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) +uint8x16_t svorqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) +uint32x4_t svorqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) +uint64x2_t svorqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) +uint16x8_t svorqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) +int8x16_t svorqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) +int32x4_t svorqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) +int64x2_t svorqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) +int16x8_t svorqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) +svbool_t svpmov_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) +svbool_t svpmov_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) +svbool_t svpmov_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) +svbool_t svpmov_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) +svbool_t svpmov_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) +svbool_t svpmov_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) +svbool_t svpmov_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) +svbool_t svpmov_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) +svbool_t svpmov_lane_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) +svbool_t svpmov_lane_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) +svbool_t svpmov_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) +svbool_t svpmov_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) +svbool_t svpmov_lane_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) +svbool_t svpmov_lane_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) +svbool_t svpmov_lane_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) +svbool_t svpmov_lane_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) +svuint64_t svpmov_lane_u64_m(svuint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) +svint64_t svpmov_lane_s64_m(svint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) +svuint16_t svpmov_lane_u16_m(svuint16_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) +svint16_t svpmov_lane_s16_m(svint16_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) +svuint32_t svpmov_lane_u32_m(svuint32_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) +svint32_t svpmov_lane_s32_m(svint32_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8_z))) +svuint8_t svpmov_u8_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8_z))) +svint8_t svpmov_s8_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64_z))) +svuint64_t svpmov_u64_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64_z))) +svint64_t svpmov_s64_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16_z))) +svuint16_t svpmov_u16_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16_z))) +svint16_t svpmov_s16_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32_z))) +svuint32_t svpmov_u32_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32_z))) +svint32_t svpmov_s32_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) +void svst1dq_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) +void svst1dq_f64(svbool_t, float64_t const *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) +void svst1dq_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) +void svst1dq_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) +void svst1dq_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) +void svst1dq_vnum_s64(svbool_t, int64_t const *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) +void svst1q_scatter_u64base_u8(svbool_t, svuint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) +void svst1q_scatter_u64base_u32(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) +void svst1q_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) +void svst1q_scatter_u64base_u16(svbool_t, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) +void svst1q_scatter_u64base_bf16(svbool_t, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) +void svst1q_scatter_u64base_s8(svbool_t, svuint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) +void svst1q_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) +void svst1q_scatter_u64base_f32(svbool_t, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) +void svst1q_scatter_u64base_f16(svbool_t, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) +void svst1q_scatter_u64base_s32(svbool_t, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) +void svst1q_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) +void svst1q_scatter_u64base_s16(svbool_t, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) +void svst1q_scatter_u64base_index_u32(svbool_t, svuint64_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) +void svst1q_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) +void svst1q_scatter_u64base_index_u16(svbool_t, svuint64_t, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) +void svst1q_scatter_u64base_index_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) +void svst1q_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) +void svst1q_scatter_u64base_index_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) +void svst1q_scatter_u64base_index_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) +void svst1q_scatter_u64base_index_s32(svbool_t, svuint64_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) +void svst1q_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) +void svst1q_scatter_u64base_index_s16(svbool_t, svuint64_t, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) +void svst1q_scatter_u64base_offset_u8(svbool_t, svuint64_t, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) +void svst1q_scatter_u64base_offset_u32(svbool_t, svuint64_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) +void svst1q_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) +void svst1q_scatter_u64base_offset_u16(svbool_t, svuint64_t, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) +void svst1q_scatter_u64base_offset_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) +void svst1q_scatter_u64base_offset_s8(svbool_t, svuint64_t, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) +void svst1q_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) +void svst1q_scatter_u64base_offset_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) +void svst1q_scatter_u64base_offset_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) +void svst1q_scatter_u64base_offset_s32(svbool_t, svuint64_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) +void svst1q_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) +void svst1q_scatter_u64base_offset_s16(svbool_t, svuint64_t, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) +void svst1q_scatter_u64index_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) +void svst1q_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) +void svst1q_scatter_u64index_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) +void svst1q_scatter_u64index_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) +void svst1q_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) +void svst1q_scatter_u64index_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) +void svst1q_scatter_u64index_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) +void svst1q_scatter_u64index_s32(svbool_t, int32_t *, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) +void svst1q_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) +void svst1q_scatter_u64index_s16(svbool_t, int16_t *, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) +void svst1q_scatter_u64offset_u8(svbool_t, uint8_t *, svuint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) +void svst1q_scatter_u64offset_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) +void svst1q_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) +void svst1q_scatter_u64offset_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) +void svst1q_scatter_u64offset_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) +void svst1q_scatter_u64offset_s8(svbool_t, int8_t *, svuint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) +void svst1q_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) +void svst1q_scatter_u64offset_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) +void svst1q_scatter_u64offset_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) +void svst1q_scatter_u64offset_s32(svbool_t, int32_t *, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) +void svst1q_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) +void svst1q_scatter_u64offset_s16(svbool_t, int16_t *, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) +void svst1wq_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) +void svst1wq_f32(svbool_t, float32_t const *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) +void svst1wq_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) +void svst1wq_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) +void svst1wq_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) +void svst1wq_vnum_s32(svbool_t, int32_t const *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) +void svst2q_u8(svbool_t, uint8_t const *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) +void svst2q_u32(svbool_t, uint32_t const *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) +void svst2q_u64(svbool_t, uint64_t const *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) +void svst2q_u16(svbool_t, uint16_t const *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) +void svst2q_s8(svbool_t, int8_t const *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) +void svst2q_f64(svbool_t, float64_t const *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) +void svst2q_f32(svbool_t, float32_t const *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) +void svst2q_f16(svbool_t, float16_t const *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) +void svst2q_s32(svbool_t, int32_t const *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) +void svst2q_s64(svbool_t, int64_t const *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) +void svst2q_s16(svbool_t, int16_t const *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) +void svst2q_bf16(svbool_t, bfloat16_t const *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) +void svst2q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) +void svst2q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) +void svst2q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) +void svst2q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) +void svst2q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) +void svst2q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) +void svst2q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) +void svst2q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) +void svst2q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) +void svst2q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) +void svst2q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) +void svst2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) +void svst3q_u8(svbool_t, uint8_t const *, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) +void svst3q_u32(svbool_t, uint32_t const *, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) +void svst3q_u64(svbool_t, uint64_t const *, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) +void svst3q_u16(svbool_t, uint16_t const *, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) +void svst3q_s8(svbool_t, int8_t const *, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) +void svst3q_f64(svbool_t, float64_t const *, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) +void svst3q_f32(svbool_t, float32_t const *, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) +void svst3q_f16(svbool_t, float16_t const *, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) +void svst3q_s32(svbool_t, int32_t const *, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) +void svst3q_s64(svbool_t, int64_t const *, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) +void svst3q_s16(svbool_t, int16_t const *, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) +void svst3q_bf16(svbool_t, bfloat16_t const *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) +void svst3q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) +void svst3q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) +void svst3q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) +void svst3q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) +void svst3q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) +void svst3q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) +void svst3q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) +void svst3q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) +void svst3q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) +void svst3q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) +void svst3q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) +void svst3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) +void svst4q_u8(svbool_t, uint8_t const *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) +void svst4q_u32(svbool_t, uint32_t const *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) +void svst4q_u64(svbool_t, uint64_t const *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) +void svst4q_u16(svbool_t, uint16_t const *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) +void svst4q_s8(svbool_t, int8_t const *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) +void svst4q_f64(svbool_t, float64_t const *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) +void svst4q_f32(svbool_t, float32_t const *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) +void svst4q_f16(svbool_t, float16_t const *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) +void svst4q_s32(svbool_t, int32_t const *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) +void svst4q_s64(svbool_t, int64_t const *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) +void svst4q_s16(svbool_t, int16_t const *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) +void svst4q_bf16(svbool_t, bfloat16_t const *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) +void svst4q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) +void svst4q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) +void svst4q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) +void svst4q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) +void svst4q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) +void svst4q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) +void svst4q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) +void svst4q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) +void svst4q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) +void svst4q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) +void svst4q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) +void svst4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) +svuint8_t svtblq_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) +svuint32_t svtblq_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) +svuint64_t svtblq_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) +svuint16_t svtblq_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) +svbfloat16_t svtblq_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) +svint8_t svtblq_s8(svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) +svfloat64_t svtblq_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) +svfloat32_t svtblq_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) +svfloat16_t svtblq_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) +svint32_t svtblq_s32(svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) +svint64_t svtblq_s64(svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) +svint16_t svtblq_s16(svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) +svuint8_t svtbxq_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) +svuint32_t svtbxq_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) +svuint64_t svtbxq_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) +svuint16_t svtbxq_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) +svbfloat16_t svtbxq_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) +svint8_t svtbxq_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) +svfloat64_t svtbxq_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) +svfloat32_t svtbxq_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) +svfloat16_t svtbxq_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) +svint32_t svtbxq_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) +svint64_t svtbxq_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) +svint16_t svtbxq_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) +svuint8_t svuzpq1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) +svuint32_t svuzpq1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) +svuint64_t svuzpq1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) +svuint16_t svuzpq1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) +svbfloat16_t svuzpq1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) +svint8_t svuzpq1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) +svfloat64_t svuzpq1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) +svfloat32_t svuzpq1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) +svfloat16_t svuzpq1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) +svint32_t svuzpq1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) +svint64_t svuzpq1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) +svint16_t svuzpq1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) +svuint8_t svuzpq2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) +svuint32_t svuzpq2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) +svuint64_t svuzpq2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) +svuint16_t svuzpq2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) +svbfloat16_t svuzpq2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) +svint8_t svuzpq2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) +svfloat64_t svuzpq2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) +svfloat32_t svuzpq2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) +svfloat16_t svuzpq2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) +svint32_t svuzpq2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) +svint64_t svuzpq2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) +svint16_t svuzpq2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) +svuint8_t svzipq1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) +svuint32_t svzipq1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) +svuint64_t svzipq1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) +svuint16_t svzipq1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) +svbfloat16_t svzipq1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) +svint8_t svzipq1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) +svfloat64_t svzipq1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) +svfloat32_t svzipq1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) +svfloat16_t svzipq1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) +svint32_t svzipq1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) +svint64_t svzipq1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) +svint16_t svzipq1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) +svuint8_t svzipq2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) +svuint32_t svzipq2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) +svuint64_t svzipq2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) +svuint16_t svzipq2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) +svbfloat16_t svzipq2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) +svint8_t svzipq2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) +svfloat64_t svzipq2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) +svfloat32_t svzipq2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) +svfloat16_t svzipq2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) +svint32_t svzipq2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) +svint64_t svzipq2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) +svint16_t svzipq2_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) +uint8x16_t svaddqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) +uint32x4_t svaddqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) +uint64x2_t svaddqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) +uint16x8_t svaddqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) +int8x16_t svaddqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) +int32x4_t svaddqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) +int64x2_t svaddqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) +int16x8_t svaddqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) +uint8x16_t svandqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) +uint32x4_t svandqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) +uint64x2_t svandqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) +uint16x8_t svandqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) +int8x16_t svandqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) +int32x4_t svandqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) +int64x2_t svandqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) +int16x8_t svandqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) +uint8x16_t sveorqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) +uint32x4_t sveorqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) +uint64x2_t sveorqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) +uint16x8_t sveorqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) +int8x16_t sveorqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) +int32x4_t sveorqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) +int64x2_t sveorqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) +int16x8_t sveorqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) +svuint8_t svextq(svuint8_t, svuint8_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) +svuint32_t svextq(svuint32_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) +svuint64_t svextq(svuint64_t, svuint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) +svuint16_t svextq(svuint16_t, svuint16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) +svbfloat16_t svextq(svbfloat16_t, svbfloat16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) +svint8_t svextq(svint8_t, svint8_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) +svfloat64_t svextq(svfloat64_t, svfloat64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) +svfloat32_t svextq(svfloat32_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) +svfloat16_t svextq(svfloat16_t, svfloat16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) +svint32_t svextq(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) +svint64_t svextq(svint64_t, svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) +svint16_t svextq(svint16_t, svint16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) +svuint32_t svld1q_gather_index_u32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) +svuint64_t svld1q_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) +svuint16_t svld1q_gather_index_u16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) +svbfloat16_t svld1q_gather_index_bf16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) +svfloat64_t svld1q_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) +svfloat32_t svld1q_gather_index_f32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) +svfloat16_t svld1q_gather_index_f16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) +svint32_t svld1q_gather_index_s32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) +svint64_t svld1q_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) +svint16_t svld1q_gather_index_s16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) +svuint8_t svld1q_gather_offset_u8(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) +svuint32_t svld1q_gather_offset_u32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) +svuint64_t svld1q_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) +svuint16_t svld1q_gather_offset_u16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) +svbfloat16_t svld1q_gather_offset_bf16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) +svint8_t svld1q_gather_offset_s8(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) +svfloat64_t svld1q_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) +svfloat32_t svld1q_gather_offset_f32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) +svfloat16_t svld1q_gather_offset_f16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) +svint32_t svld1q_gather_offset_s32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) +svint64_t svld1q_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) +svint16_t svld1q_gather_offset_s16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) +svuint8_t svld1q_gather_u8(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) +svuint32_t svld1q_gather_u32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) +svuint64_t svld1q_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) +svuint16_t svld1q_gather_u16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) +svbfloat16_t svld1q_gather_bf16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) +svint8_t svld1q_gather_s8(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) +svfloat64_t svld1q_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) +svfloat32_t svld1q_gather_f32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) +svfloat16_t svld1q_gather_f16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) +svint32_t svld1q_gather_s32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) +svint64_t svld1q_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) +svint16_t svld1q_gather_s16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) +svuint32_t svld1q_gather_index(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) +svuint64_t svld1q_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) +svuint16_t svld1q_gather_index(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) +svbfloat16_t svld1q_gather_index(svbool_t, bfloat16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) +svfloat64_t svld1q_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) +svfloat32_t svld1q_gather_index(svbool_t, float32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) +svfloat16_t svld1q_gather_index(svbool_t, float16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) +svint32_t svld1q_gather_index(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) +svint64_t svld1q_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) +svint16_t svld1q_gather_index(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) +svuint8_t svld1q_gather_offset(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) +svuint32_t svld1q_gather_offset(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) +svuint64_t svld1q_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) +svuint16_t svld1q_gather_offset(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) +svbfloat16_t svld1q_gather_offset(svbool_t, bfloat16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) +svint8_t svld1q_gather_offset(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) +svfloat64_t svld1q_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) +svfloat32_t svld1q_gather_offset(svbool_t, float32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) +svfloat16_t svld1q_gather_offset(svbool_t, float16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) +svint32_t svld1q_gather_offset(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) +svint64_t svld1q_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) +svint16_t svld1q_gather_offset(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) +svuint64_t svld1udq(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) +svfloat64_t svld1udq(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) +svint64_t svld1udq(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) +svuint64_t svld1udq_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) +svfloat64_t svld1udq_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) +svint64_t svld1udq_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) +svuint32_t svld1uwq(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) +svfloat32_t svld1uwq(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) +svint32_t svld1uwq(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) +svuint32_t svld1uwq_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) +svfloat32_t svld1uwq_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) +svint32_t svld1uwq_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) +svuint8x2_t svld2q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) +svuint32x2_t svld2q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) +svuint64x2_t svld2q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) +svuint16x2_t svld2q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) +svint8x2_t svld2q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) +svfloat64x2_t svld2q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) +svfloat32x2_t svld2q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) +svfloat16x2_t svld2q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) +svint32x2_t svld2q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) +svint64x2_t svld2q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) +svint16x2_t svld2q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) +svbfloat16x2_t svld2q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) +svuint8x2_t svld2q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) +svuint32x2_t svld2q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) +svuint64x2_t svld2q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) +svuint16x2_t svld2q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) +svint8x2_t svld2q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) +svfloat64x2_t svld2q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) +svfloat32x2_t svld2q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) +svfloat16x2_t svld2q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) +svint32x2_t svld2q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) +svint64x2_t svld2q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) +svint16x2_t svld2q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) +svbfloat16x2_t svld2q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) +svuint8x3_t svld3q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) +svuint32x3_t svld3q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) +svuint64x3_t svld3q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) +svuint16x3_t svld3q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) +svint8x3_t svld3q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) +svfloat64x3_t svld3q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) +svfloat32x3_t svld3q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) +svfloat16x3_t svld3q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) +svint32x3_t svld3q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) +svint64x3_t svld3q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) +svint16x3_t svld3q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) +svbfloat16x3_t svld3q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) +svuint8x3_t svld3q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) +svuint32x3_t svld3q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) +svuint64x3_t svld3q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) +svuint16x3_t svld3q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) +svint8x3_t svld3q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) +svfloat64x3_t svld3q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) +svfloat32x3_t svld3q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) +svfloat16x3_t svld3q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) +svint32x3_t svld3q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) +svint64x3_t svld3q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) +svint16x3_t svld3q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) +svbfloat16x3_t svld3q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) +svuint8x4_t svld4q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) +svuint32x4_t svld4q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) +svuint64x4_t svld4q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) +svuint16x4_t svld4q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) +svint8x4_t svld4q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) +svfloat64x4_t svld4q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) +svfloat32x4_t svld4q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) +svfloat16x4_t svld4q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) +svint32x4_t svld4q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) +svint64x4_t svld4q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) +svint16x4_t svld4q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) +svbfloat16x4_t svld4q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) +svuint8x4_t svld4q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) +svuint32x4_t svld4q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) +svuint64x4_t svld4q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) +svuint16x4_t svld4q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) +svint8x4_t svld4q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) +svfloat64x4_t svld4q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) +svfloat32x4_t svld4q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) +svfloat16x4_t svld4q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) +svint32x4_t svld4q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) +svint64x4_t svld4q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) +svint16x4_t svld4q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) +svbfloat16x4_t svld4q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) +float64x2_t svmaxnmqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) +float32x4_t svmaxnmqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) +float16x8_t svmaxnmqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) +float64x2_t svmaxqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) +float32x4_t svmaxqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) +float16x8_t svmaxqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) +int8x16_t svmaxqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) +int32x4_t svmaxqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) +int64x2_t svmaxqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) +int16x8_t svmaxqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) +uint8x16_t svmaxqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) +uint32x4_t svmaxqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) +uint64x2_t svmaxqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) +uint16x8_t svmaxqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) +float64x2_t svminnmqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) +float32x4_t svminnmqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) +float16x8_t svminnmqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) +float64x2_t svminqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) +float32x4_t svminqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) +float16x8_t svminqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) +int8x16_t svminqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) +int32x4_t svminqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) +int64x2_t svminqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) +int16x8_t svminqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) +uint8x16_t svminqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) +uint32x4_t svminqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) +uint64x2_t svminqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) +uint16x8_t svminqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) +uint8x16_t svorqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) +uint32x4_t svorqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) +uint64x2_t svorqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) +uint16x8_t svorqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) +int8x16_t svorqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) +int32x4_t svorqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) +int64x2_t svorqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) +int16x8_t svorqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) +svbool_t svpmov(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) +svbool_t svpmov(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) +svbool_t svpmov(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) +svbool_t svpmov(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) +svbool_t svpmov(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) +svbool_t svpmov(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) +svbool_t svpmov(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) +svbool_t svpmov(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) +svbool_t svpmov_lane(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) +svbool_t svpmov_lane(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) +svbool_t svpmov_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) +svbool_t svpmov_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) +svbool_t svpmov_lane(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) +svbool_t svpmov_lane(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) +svbool_t svpmov_lane(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) +svbool_t svpmov_lane(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) +svuint64_t svpmov_lane_m(svuint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) +svint64_t svpmov_lane_m(svint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) +svuint16_t svpmov_lane_m(svuint16_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) +svint16_t svpmov_lane_m(svint16_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) +svuint32_t svpmov_lane_m(svuint32_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) +svint32_t svpmov_lane_m(svint32_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) +void svst1dq(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) +void svst1dq(svbool_t, float64_t const *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) +void svst1dq(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) +void svst1dq_vnum(svbool_t, uint64_t const *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) +void svst1dq_vnum(svbool_t, float64_t const *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) +void svst1dq_vnum(svbool_t, int64_t const *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) +void svst1q_scatter(svbool_t, svuint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) +void svst1q_scatter(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) +void svst1q_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) +void svst1q_scatter(svbool_t, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) +void svst1q_scatter(svbool_t, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) +void svst1q_scatter(svbool_t, svuint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) +void svst1q_scatter(svbool_t, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) +void svst1q_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) +void svst1q_scatter(svbool_t, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) +void svst1q_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) +void svst1q_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) +void svst1q_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) +void svst1q_scatter_index(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) +void svst1q_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) +void svst1q_scatter_index(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) +void svst1q_scatter_index(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) +void svst1q_scatter_index(svbool_t, int32_t *, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) +void svst1q_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) +void svst1q_scatter_index(svbool_t, int16_t *, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) +void svst1q_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) +void svst1q_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) +void svst1q_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) +void svst1q_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) +void svst1q_scatter_offset(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) +void svst1q_scatter_offset(svbool_t, int8_t *, svuint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) +void svst1q_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) +void svst1q_scatter_offset(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) +void svst1q_scatter_offset(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) +void svst1q_scatter_offset(svbool_t, int32_t *, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) +void svst1q_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) +void svst1q_scatter_offset(svbool_t, int16_t *, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) +void svst1wq(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) +void svst1wq(svbool_t, float32_t const *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) +void svst1wq(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) +void svst1wq_vnum(svbool_t, uint32_t const *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) +void svst1wq_vnum(svbool_t, float32_t const *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) +void svst1wq_vnum(svbool_t, int32_t const *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) +void svst2q(svbool_t, uint8_t const *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) +void svst2q(svbool_t, uint32_t const *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) +void svst2q(svbool_t, uint64_t const *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) +void svst2q(svbool_t, uint16_t const *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) +void svst2q(svbool_t, int8_t const *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) +void svst2q(svbool_t, float64_t const *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) +void svst2q(svbool_t, float32_t const *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) +void svst2q(svbool_t, float16_t const *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) +void svst2q(svbool_t, int32_t const *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) +void svst2q(svbool_t, int64_t const *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) +void svst2q(svbool_t, int16_t const *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) +void svst2q(svbool_t, bfloat16_t const *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) +void svst2q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) +void svst2q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) +void svst2q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) +void svst2q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) +void svst2q_vnum(svbool_t, int8_t const *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) +void svst2q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) +void svst2q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) +void svst2q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) +void svst2q_vnum(svbool_t, int32_t const *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) +void svst2q_vnum(svbool_t, int64_t const *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) +void svst2q_vnum(svbool_t, int16_t const *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) +void svst2q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) +void svst3q(svbool_t, uint8_t const *, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) +void svst3q(svbool_t, uint32_t const *, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) +void svst3q(svbool_t, uint64_t const *, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) +void svst3q(svbool_t, uint16_t const *, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) +void svst3q(svbool_t, int8_t const *, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) +void svst3q(svbool_t, float64_t const *, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) +void svst3q(svbool_t, float32_t const *, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) +void svst3q(svbool_t, float16_t const *, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) +void svst3q(svbool_t, int32_t const *, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) +void svst3q(svbool_t, int64_t const *, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) +void svst3q(svbool_t, int16_t const *, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) +void svst3q(svbool_t, bfloat16_t const *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) +void svst3q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) +void svst3q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) +void svst3q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) +void svst3q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) +void svst3q_vnum(svbool_t, int8_t const *, int64_t, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) +void svst3q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) +void svst3q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) +void svst3q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) +void svst3q_vnum(svbool_t, int32_t const *, int64_t, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) +void svst3q_vnum(svbool_t, int64_t const *, int64_t, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) +void svst3q_vnum(svbool_t, int16_t const *, int64_t, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) +void svst3q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) +void svst4q(svbool_t, uint8_t const *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) +void svst4q(svbool_t, uint32_t const *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) +void svst4q(svbool_t, uint64_t const *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) +void svst4q(svbool_t, uint16_t const *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) +void svst4q(svbool_t, int8_t const *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) +void svst4q(svbool_t, float64_t const *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) +void svst4q(svbool_t, float32_t const *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) +void svst4q(svbool_t, float16_t const *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) +void svst4q(svbool_t, int32_t const *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) +void svst4q(svbool_t, int64_t const *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) +void svst4q(svbool_t, int16_t const *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) +void svst4q(svbool_t, bfloat16_t const *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) +void svst4q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) +void svst4q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) +void svst4q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) +void svst4q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) +void svst4q_vnum(svbool_t, int8_t const *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) +void svst4q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) +void svst4q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) +void svst4q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) +void svst4q_vnum(svbool_t, int32_t const *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) +void svst4q_vnum(svbool_t, int64_t const *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) +void svst4q_vnum(svbool_t, int16_t const *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) +void svst4q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) +svuint8_t svtblq(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) +svuint32_t svtblq(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) +svuint64_t svtblq(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) +svuint16_t svtblq(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) +svbfloat16_t svtblq(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) +svint8_t svtblq(svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) +svfloat64_t svtblq(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) +svfloat32_t svtblq(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) +svfloat16_t svtblq(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) +svint32_t svtblq(svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) +svint64_t svtblq(svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) +svint16_t svtblq(svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) +svuint8_t svtbxq(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) +svuint32_t svtbxq(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) +svuint64_t svtbxq(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) +svuint16_t svtbxq(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) +svbfloat16_t svtbxq(svbfloat16_t, svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) +svint8_t svtbxq(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) +svfloat64_t svtbxq(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) +svfloat32_t svtbxq(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) +svfloat16_t svtbxq(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) +svint32_t svtbxq(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) +svint64_t svtbxq(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) +svint16_t svtbxq(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) +svuint8_t svuzpq1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) +svuint32_t svuzpq1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) +svuint64_t svuzpq1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) +svuint16_t svuzpq1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) +svbfloat16_t svuzpq1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) +svint8_t svuzpq1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) +svfloat64_t svuzpq1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) +svfloat32_t svuzpq1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) +svfloat16_t svuzpq1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) +svint32_t svuzpq1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) +svint64_t svuzpq1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) +svint16_t svuzpq1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) +svuint8_t svuzpq2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) +svuint32_t svuzpq2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) +svuint64_t svuzpq2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) +svuint16_t svuzpq2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) +svbfloat16_t svuzpq2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) +svint8_t svuzpq2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) +svfloat64_t svuzpq2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) +svfloat32_t svuzpq2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) +svfloat16_t svuzpq2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) +svint32_t svuzpq2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) +svint64_t svuzpq2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) +svint16_t svuzpq2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) +svuint8_t svzipq1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) +svuint32_t svzipq1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) +svuint64_t svzipq1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) +svuint16_t svzipq1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) +svbfloat16_t svzipq1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) +svint8_t svzipq1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) +svfloat64_t svzipq1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) +svfloat32_t svzipq1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) +svfloat16_t svzipq1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) +svint32_t svzipq1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) +svint64_t svzipq1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) +svint16_t svzipq1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) +svuint8_t svzipq2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) +svuint32_t svzipq2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) +svuint64_t svzipq2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) +svuint16_t svzipq2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) +svbfloat16_t svzipq2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) +svint8_t svzipq2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) +svfloat64_t svzipq2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) +svfloat32_t svzipq2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) +svfloat16_t svzipq2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) +svint32_t svzipq2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) +svint64_t svzipq2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) +svint16_t svzipq2(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq_bf16(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b16))) +svbool_t svpsel_lane_b16(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b32))) +svbool_t svpsel_lane_b32(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b64))) +svbool_t svpsel_lane_b64(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b8))) +svbool_t svpsel_lane_b8(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_bf16_m(svbfloat16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_s16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_m(svbfloat16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) +svfloat32_t svbfmlslb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) +svfloat32_t svbfmlslb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) +svfloat32_t svbfmlslt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) +svfloat32_t svbfmlslt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) +svfloat64_t svclamp_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) +svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) +svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c8))) +uint64_t svcntp_c8(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c32))) +uint64_t svcntp_c32(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c64))) +uint64_t svcntp_c64(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c16))) +uint64_t svcntp_c16(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) +svboolx2_t svcreate2_b(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) +svboolx4_t svcreate4_b(svbool_t, svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) +svfloat32_t svdot_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) +svint32_t svdot_s32_s16(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) +svuint32_t svdot_u32_u16(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) +svfloat32_t svdot_lane_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) +svint32_t svdot_lane_s32_s16(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) +svuint32_t svdot_lane_u32_u16(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) +svbool_t svget2_b(svboolx2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) +svbool_t svget4_b(svboolx4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) +svuint8x2_t svld1_u8_x2(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) +svint8x2_t svld1_s8_x2(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) +svuint64x2_t svld1_u64_x2(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) +svfloat64x2_t svld1_f64_x2(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) +svint64x2_t svld1_s64_x2(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) +svuint16x2_t svld1_u16_x2(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) +svbfloat16x2_t svld1_bf16_x2(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) +svfloat16x2_t svld1_f16_x2(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) +svint16x2_t svld1_s16_x2(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) +svuint32x2_t svld1_u32_x2(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) +svfloat32x2_t svld1_f32_x2(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) +svint32x2_t svld1_s32_x2(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) +svuint8x4_t svld1_u8_x4(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) +svint8x4_t svld1_s8_x4(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) +svuint64x4_t svld1_u64_x4(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) +svfloat64x4_t svld1_f64_x4(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) +svint64x4_t svld1_s64_x4(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) +svuint16x4_t svld1_u16_x4(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) +svbfloat16x4_t svld1_bf16_x4(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) +svfloat16x4_t svld1_f16_x4(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) +svint16x4_t svld1_s16_x4(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) +svuint32x4_t svld1_u32_x4(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) +svfloat32x4_t svld1_f32_x4(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) +svint32x4_t svld1_s32_x4(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) +svuint8x2_t svld1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) +svint8x2_t svld1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) +svuint64x2_t svld1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) +svfloat64x2_t svld1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) +svint64x2_t svld1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) +svuint16x2_t svld1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) +svbfloat16x2_t svld1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) +svfloat16x2_t svld1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) +svint16x2_t svld1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) +svuint32x2_t svld1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) +svfloat32x2_t svld1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) +svint32x2_t svld1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) +svuint8x4_t svld1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) +svint8x4_t svld1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) +svuint64x4_t svld1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) +svfloat64x4_t svld1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) +svint64x4_t svld1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) +svuint16x4_t svld1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) +svbfloat16x4_t svld1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) +svfloat16x4_t svld1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) +svint16x4_t svld1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) +svuint32x4_t svld1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) +svfloat32x4_t svld1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) +svint32x4_t svld1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) +svuint8x2_t svldnt1_u8_x2(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) +svint8x2_t svldnt1_s8_x2(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) +svuint64x2_t svldnt1_u64_x2(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) +svfloat64x2_t svldnt1_f64_x2(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) +svint64x2_t svldnt1_s64_x2(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) +svuint16x2_t svldnt1_u16_x2(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) +svbfloat16x2_t svldnt1_bf16_x2(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) +svfloat16x2_t svldnt1_f16_x2(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) +svint16x2_t svldnt1_s16_x2(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) +svuint32x2_t svldnt1_u32_x2(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) +svfloat32x2_t svldnt1_f32_x2(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) +svint32x2_t svldnt1_s32_x2(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) +svuint8x4_t svldnt1_u8_x4(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) +svint8x4_t svldnt1_s8_x4(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) +svuint64x4_t svldnt1_u64_x4(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) +svfloat64x4_t svldnt1_f64_x4(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) +svint64x4_t svldnt1_s64_x4(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) +svuint16x4_t svldnt1_u16_x4(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) +svbfloat16x4_t svldnt1_bf16_x4(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) +svfloat16x4_t svldnt1_f16_x4(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) +svint16x4_t svldnt1_s16_x4(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) +svuint32x4_t svldnt1_u32_x4(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) +svfloat32x4_t svldnt1_f32_x4(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) +svint32x4_t svldnt1_s32_x4(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) +svuint8x2_t svldnt1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) +svint8x2_t svldnt1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) +svuint64x2_t svldnt1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) +svfloat64x2_t svldnt1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) +svint64x2_t svldnt1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) +svuint16x2_t svldnt1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) +svbfloat16x2_t svldnt1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) +svfloat16x2_t svldnt1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) +svint16x2_t svldnt1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) +svuint32x2_t svldnt1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) +svfloat32x2_t svldnt1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) +svint32x2_t svldnt1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) +svuint8x4_t svldnt1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) +svint8x4_t svldnt1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) +svuint64x4_t svldnt1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) +svfloat64x4_t svldnt1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) +svint64x4_t svldnt1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) +svuint16x4_t svldnt1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) +svbfloat16x4_t svldnt1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) +svfloat16x4_t svldnt1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) +svint16x4_t svldnt1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) +svuint32x4_t svldnt1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) +svfloat32x4_t svldnt1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) +svint32x4_t svldnt1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8))) +svbool_t svpext_lane_c8(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32))) +svbool_t svpext_lane_c32(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64))) +svbool_t svpext_lane_c64(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16))) +svbool_t svpext_lane_c16(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8_x2))) +svboolx2_t svpext_lane_c8_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32_x2))) +svboolx2_t svpext_lane_c32_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64_x2))) +svboolx2_t svpext_lane_c64_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16_x2))) +svboolx2_t svpext_lane_c16_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_c))) +svcount_t svpfalse_c(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c16))) +svcount_t svpsel_lane_c16(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c32))) +svcount_t svpsel_lane_c32(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c64))) +svcount_t svpsel_lane_c64(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c8))) +svcount_t svpsel_lane_c8(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c8))) +svcount_t svptrue_c8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c32))) +svcount_t svptrue_c32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64))) +svcount_t svptrue_c64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16))) +svcount_t svptrue_c16(void); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) svint16_t svqcvtn_s16_s32_x2(svint32x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) svuint16_t svqcvtn_u16_s32_x2(svint32x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) svuint16_t svqcvtn_u16_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) +svint16_t svqrshrn_n_s16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) +svuint16_t svqrshrn_n_u16_u32_x2(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) +svuint16_t svqrshrun_n_u16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret_b(svcount_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret_c(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) +svboolx2_t svset2_b(svboolx2_t, uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) +svboolx4_t svset4_b(svboolx4_t, uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) +void svst1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) +void svst1_s8_x2(svcount_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) +void svst1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) +void svst1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) +void svst1_s64_x2(svcount_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) +void svst1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) +void svst1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) +void svst1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) +void svst1_s16_x2(svcount_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) +void svst1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) +void svst1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) +void svst1_s32_x2(svcount_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) +void svst1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) +void svst1_s8_x4(svcount_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) +void svst1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) +void svst1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) +void svst1_s64_x4(svcount_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) +void svst1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) +void svst1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) +void svst1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) +void svst1_s16_x4(svcount_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) +void svst1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) +void svst1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) +void svst1_s32_x4(svcount_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) +void svst1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) +void svst1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) +void svst1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) +void svst1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) +void svst1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) +void svst1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) +void svst1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) +void svst1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) +void svst1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) +void svst1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) +void svst1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) +void svst1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) +void svst1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) +void svst1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) +void svst1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) +void svst1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) +void svst1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) +void svst1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) +void svst1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) +void svst1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) +void svst1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) +void svst1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) +void svst1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) +void svst1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) +void svstnt1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) +void svstnt1_s8_x2(svcount_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) +void svstnt1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) +void svstnt1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) +void svstnt1_s64_x2(svcount_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) +void svstnt1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) +void svstnt1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) +void svstnt1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) +void svstnt1_s16_x2(svcount_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) +void svstnt1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) +void svstnt1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) +void svstnt1_s32_x2(svcount_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) +void svstnt1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) +void svstnt1_s8_x4(svcount_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) +void svstnt1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) +void svstnt1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) +void svstnt1_s64_x4(svcount_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) +void svstnt1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) +void svstnt1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) +void svstnt1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) +void svstnt1_s16_x4(svcount_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) +void svstnt1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) +void svstnt1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) +void svstnt1_s32_x4(svcount_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) +void svstnt1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) +void svstnt1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) +void svstnt1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) +void svstnt1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) +void svstnt1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) +void svstnt1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) +void svstnt1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) +void svstnt1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) +void svstnt1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) +void svstnt1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) +void svstnt1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) +void svstnt1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) +void svstnt1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) +void svstnt1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) +void svstnt1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) +void svstnt1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) +void svstnt1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) +void svstnt1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) +void svstnt1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) +void svstnt1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) +void svstnt1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) +void svstnt1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) +void svstnt1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) +void svstnt1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_b))) +svboolx2_t svundef2_b(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_b))) +svboolx4_t svundef4_b(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) +svcount_t svwhilege_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) +svcount_t svwhilege_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) +svcount_t svwhilege_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) +svcount_t svwhilege_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) +svcount_t svwhilege_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) +svcount_t svwhilege_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) +svcount_t svwhilege_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) +svcount_t svwhilege_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) +svboolx2_t svwhilege_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) +svboolx2_t svwhilege_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) +svboolx2_t svwhilege_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) +svboolx2_t svwhilege_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) +svboolx2_t svwhilege_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) +svboolx2_t svwhilege_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) +svboolx2_t svwhilege_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) +svboolx2_t svwhilege_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) +svcount_t svwhilegt_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) +svcount_t svwhilegt_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) +svcount_t svwhilegt_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) +svcount_t svwhilegt_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) +svcount_t svwhilegt_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) +svcount_t svwhilegt_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) +svcount_t svwhilegt_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) +svcount_t svwhilegt_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) +svboolx2_t svwhilegt_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) +svboolx2_t svwhilegt_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) +svboolx2_t svwhilegt_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) +svboolx2_t svwhilegt_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) +svboolx2_t svwhilegt_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) +svboolx2_t svwhilegt_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) +svboolx2_t svwhilegt_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) +svboolx2_t svwhilegt_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) +svcount_t svwhilele_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) +svcount_t svwhilele_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) +svcount_t svwhilele_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) +svcount_t svwhilele_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) +svcount_t svwhilele_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) +svcount_t svwhilele_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) +svcount_t svwhilele_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) +svcount_t svwhilele_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) +svboolx2_t svwhilele_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) +svboolx2_t svwhilele_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) +svboolx2_t svwhilele_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) +svboolx2_t svwhilele_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) +svboolx2_t svwhilele_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) +svboolx2_t svwhilele_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) +svboolx2_t svwhilele_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) +svboolx2_t svwhilele_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) +svcount_t svwhilelt_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) +svcount_t svwhilelt_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) +svcount_t svwhilelt_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) +svcount_t svwhilelt_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) +svcount_t svwhilelt_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) +svcount_t svwhilelt_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) +svcount_t svwhilelt_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) +svcount_t svwhilelt_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) +svboolx2_t svwhilelt_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) +svboolx2_t svwhilelt_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) +svboolx2_t svwhilelt_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) +svboolx2_t svwhilelt_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) +svboolx2_t svwhilelt_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) +svboolx2_t svwhilelt_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) +svboolx2_t svwhilelt_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) +svboolx2_t svwhilelt_b16_s64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) +svfloat32_t svbfmlslb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) +svfloat32_t svbfmlslb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) +svfloat32_t svbfmlslt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) +svfloat32_t svbfmlslt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) +svfloat64_t svclamp(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) +svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) +svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) +svboolx2_t svcreate2(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) +svboolx4_t svcreate4(svbool_t, svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) +svfloat32_t svdot(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) +svint32_t svdot(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) +svuint32_t svdot(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) +svfloat32_t svdot_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) +svint32_t svdot_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) +svuint32_t svdot_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) +svbool_t svget2(svboolx2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) +svbool_t svget4(svboolx4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) +svuint8x2_t svld1_x2(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) +svint8x2_t svld1_x2(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) +svuint64x2_t svld1_x2(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) +svfloat64x2_t svld1_x2(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) +svint64x2_t svld1_x2(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) +svuint16x2_t svld1_x2(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) +svbfloat16x2_t svld1_x2(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) +svfloat16x2_t svld1_x2(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) +svint16x2_t svld1_x2(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) +svuint32x2_t svld1_x2(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) +svfloat32x2_t svld1_x2(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) +svint32x2_t svld1_x2(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) +svuint8x4_t svld1_x4(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) +svint8x4_t svld1_x4(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) +svuint64x4_t svld1_x4(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) +svfloat64x4_t svld1_x4(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) +svint64x4_t svld1_x4(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) +svuint16x4_t svld1_x4(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) +svbfloat16x4_t svld1_x4(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) +svfloat16x4_t svld1_x4(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) +svint16x4_t svld1_x4(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) +svuint32x4_t svld1_x4(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) +svfloat32x4_t svld1_x4(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) +svint32x4_t svld1_x4(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) +svuint8x2_t svld1_vnum_x2(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) +svint8x2_t svld1_vnum_x2(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) +svuint64x2_t svld1_vnum_x2(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) +svfloat64x2_t svld1_vnum_x2(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) +svint64x2_t svld1_vnum_x2(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) +svuint16x2_t svld1_vnum_x2(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) +svbfloat16x2_t svld1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) +svfloat16x2_t svld1_vnum_x2(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) +svint16x2_t svld1_vnum_x2(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) +svuint32x2_t svld1_vnum_x2(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) +svfloat32x2_t svld1_vnum_x2(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) +svint32x2_t svld1_vnum_x2(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) +svuint8x4_t svld1_vnum_x4(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) +svint8x4_t svld1_vnum_x4(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) +svuint64x4_t svld1_vnum_x4(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) +svfloat64x4_t svld1_vnum_x4(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) +svint64x4_t svld1_vnum_x4(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) +svuint16x4_t svld1_vnum_x4(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) +svbfloat16x4_t svld1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) +svfloat16x4_t svld1_vnum_x4(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) +svint16x4_t svld1_vnum_x4(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) +svuint32x4_t svld1_vnum_x4(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) +svfloat32x4_t svld1_vnum_x4(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) +svint32x4_t svld1_vnum_x4(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) +svuint8x2_t svldnt1_x2(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) +svint8x2_t svldnt1_x2(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) +svuint64x2_t svldnt1_x2(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) +svfloat64x2_t svldnt1_x2(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) +svint64x2_t svldnt1_x2(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) +svuint16x2_t svldnt1_x2(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) +svbfloat16x2_t svldnt1_x2(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) +svfloat16x2_t svldnt1_x2(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) +svint16x2_t svldnt1_x2(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) +svuint32x2_t svldnt1_x2(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) +svfloat32x2_t svldnt1_x2(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) +svint32x2_t svldnt1_x2(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) +svuint8x4_t svldnt1_x4(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) +svint8x4_t svldnt1_x4(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) +svuint64x4_t svldnt1_x4(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) +svfloat64x4_t svldnt1_x4(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) +svint64x4_t svldnt1_x4(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) +svuint16x4_t svldnt1_x4(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) +svbfloat16x4_t svldnt1_x4(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) +svfloat16x4_t svldnt1_x4(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) +svint16x4_t svldnt1_x4(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) +svuint32x4_t svldnt1_x4(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) +svfloat32x4_t svldnt1_x4(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) +svint32x4_t svldnt1_x4(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) +svuint8x2_t svldnt1_vnum_x2(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) +svint8x2_t svldnt1_vnum_x2(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) +svuint64x2_t svldnt1_vnum_x2(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) +svfloat64x2_t svldnt1_vnum_x2(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) +svint64x2_t svldnt1_vnum_x2(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) +svuint16x2_t svldnt1_vnum_x2(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) +svbfloat16x2_t svldnt1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) +svfloat16x2_t svldnt1_vnum_x2(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) +svint16x2_t svldnt1_vnum_x2(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) +svuint32x2_t svldnt1_vnum_x2(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) +svfloat32x2_t svldnt1_vnum_x2(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) +svint32x2_t svldnt1_vnum_x2(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) +svuint8x4_t svldnt1_vnum_x4(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) +svint8x4_t svldnt1_vnum_x4(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) +svuint64x4_t svldnt1_vnum_x4(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) +svfloat64x4_t svldnt1_vnum_x4(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) +svint64x4_t svldnt1_vnum_x4(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) +svuint16x4_t svldnt1_vnum_x4(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) +svbfloat16x4_t svldnt1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) +svfloat16x4_t svldnt1_vnum_x4(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) +svint16x4_t svldnt1_vnum_x4(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) +svuint32x4_t svldnt1_vnum_x4(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) +svfloat32x4_t svldnt1_vnum_x4(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) +svint32x4_t svldnt1_vnum_x4(svcount_t, int32_t const *, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) svint16_t svqcvtn_s16(svint32x2_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) svuint16_t svqcvtn_u16(svint32x2_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) svuint16_t svqcvtn_u16(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) +svint16_t svqrshrn_s16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) +svuint16_t svqrshrn_u16(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) +svuint16_t svqrshrun_u16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret(svcount_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) +svboolx2_t svset2(svboolx2_t, uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) +svboolx4_t svset4(svboolx4_t, uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) +void svst1(svcount_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) +void svst1(svcount_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) +void svst1(svcount_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) +void svst1(svcount_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) +void svst1(svcount_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) +void svst1(svcount_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) +void svst1(svcount_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) +void svst1(svcount_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) +void svst1(svcount_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) +void svst1(svcount_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) +void svst1(svcount_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) +void svst1(svcount_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) +void svst1(svcount_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) +void svst1(svcount_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) +void svst1(svcount_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) +void svst1(svcount_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) +void svst1(svcount_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) +void svst1(svcount_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) +void svst1(svcount_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) +void svst1(svcount_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) +void svst1(svcount_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) +void svst1(svcount_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) +void svst1(svcount_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) +void svst1(svcount_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) +void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) +void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) +void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) +void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) +void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) +void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) +void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) +void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) +void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) +void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) +void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) +void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) +void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) +void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) +void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) +void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) +void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) +void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) +void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) +void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) +void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) +void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) +void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) +void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) +void svstnt1(svcount_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) +void svstnt1(svcount_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) +void svstnt1(svcount_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) +void svstnt1(svcount_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) +void svstnt1(svcount_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) +void svstnt1(svcount_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) +void svstnt1(svcount_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) +void svstnt1(svcount_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) +void svstnt1(svcount_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) +void svstnt1(svcount_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) +void svstnt1(svcount_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) +void svstnt1(svcount_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) +void svstnt1(svcount_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) +void svstnt1(svcount_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) +void svstnt1(svcount_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) +void svstnt1(svcount_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) +void svstnt1(svcount_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) +void svstnt1(svcount_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) +void svstnt1(svcount_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) +void svstnt1(svcount_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) +void svstnt1(svcount_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) +void svstnt1(svcount_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) +void svstnt1(svcount_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) +void svstnt1(svcount_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) +void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) +void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) +void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) +void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) +void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) +void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) +void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) +void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) +void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) +void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) +void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) +void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) +void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) +void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) +void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) +void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) +void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) +void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) +void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) +void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) +void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) +void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) +void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) +void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) +svcount_t svwhilege_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) +svcount_t svwhilege_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) +svcount_t svwhilege_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) +svcount_t svwhilege_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) +svcount_t svwhilege_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) +svcount_t svwhilege_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) +svcount_t svwhilege_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) +svcount_t svwhilege_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) +svboolx2_t svwhilege_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) +svboolx2_t svwhilege_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) +svboolx2_t svwhilege_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) +svboolx2_t svwhilege_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) +svboolx2_t svwhilege_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) +svboolx2_t svwhilege_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) +svboolx2_t svwhilege_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) +svboolx2_t svwhilege_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) +svcount_t svwhilegt_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) +svcount_t svwhilegt_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) +svcount_t svwhilegt_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) +svcount_t svwhilegt_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) +svcount_t svwhilegt_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) +svcount_t svwhilegt_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) +svcount_t svwhilegt_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) +svcount_t svwhilegt_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) +svboolx2_t svwhilegt_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) +svboolx2_t svwhilegt_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) +svboolx2_t svwhilegt_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) +svboolx2_t svwhilegt_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) +svboolx2_t svwhilegt_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) +svboolx2_t svwhilegt_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) +svboolx2_t svwhilegt_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) +svboolx2_t svwhilegt_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) +svcount_t svwhilele_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) +svcount_t svwhilele_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) +svcount_t svwhilele_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) +svcount_t svwhilele_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) +svcount_t svwhilele_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) +svcount_t svwhilele_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) +svcount_t svwhilele_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) +svcount_t svwhilele_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) +svboolx2_t svwhilele_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) +svboolx2_t svwhilele_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) +svboolx2_t svwhilele_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) +svboolx2_t svwhilele_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) +svboolx2_t svwhilele_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) +svboolx2_t svwhilele_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) +svboolx2_t svwhilele_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) +svboolx2_t svwhilele_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) +svcount_t svwhilelt_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) +svcount_t svwhilelt_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) +svcount_t svwhilelt_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) +svcount_t svwhilelt_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) +svcount_t svwhilelt_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) +svcount_t svwhilelt_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) +svcount_t svwhilelt_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) +svcount_t svwhilelt_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) +svboolx2_t svwhilelt_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) +svboolx2_t svwhilelt_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) +svboolx2_t svwhilelt_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) +svboolx2_t svwhilelt_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) +svboolx2_t svwhilelt_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) +svboolx2_t svwhilelt_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) +svboolx2_t svwhilelt_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) +svboolx2_t svwhilelt_b16_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq_s32(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt_s32(svint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt_s64(svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt_s16(svint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt_s32(svuint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt_s64(svuint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt_s16(svuint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2_s8(svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2_s32(svint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2_s64(svint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2_s16(svint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt(svint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt(svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt(svint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt(svuint16_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt(svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt(svuint8_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt(svuint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt(svuint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt(svuint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2(svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2(svint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2(svint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2(svint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar(svint16_t, svint16_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) @@ -5158,38 +18452,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) float32_t svaddv_f32(svbool_t, svfloat32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) float16_t svaddv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) -svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) -svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) -svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) -svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) -svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) -svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) -svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) -svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) -svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) -svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) -svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) -svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) -svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) -svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) -svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) -svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) svbool_t svand_b_z(svbool_t, svbool_t, svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) @@ -6258,18 +19520,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw))) uint64_t svcntw(void); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat))) uint64_t svcntw_pat(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) -svuint32_t svcompact_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) -svuint64_t svcompact_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) -svfloat64_t svcompact_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) -svfloat32_t svcompact_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) -svint32_t svcompact_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) -svint64_t svcompact_s64(svbool_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) @@ -7016,12 +20266,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) int64_t sveorv_s64(svbool_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) int16_t sveorv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) -svfloat64_t svexpa_f64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) -svfloat32_t svexpa_f32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) -svfloat16_t svexpa_f16(svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) @@ -7286,90 +20530,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) svint64_t svld1_s64(svbool_t, int64_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) svint16_t svld1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) -svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) -svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) -svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) -svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) -svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) -svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) -svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) -svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) -svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) -svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) -svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) -svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) -svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) -svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) -svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) -svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) -svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) -svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) -svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) -svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) -svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) -svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) -svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) -svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) -svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) -svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) -svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) -svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) -svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) -svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) -svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) -svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) -svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) -svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) -svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) -svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) -svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) -svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) -svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) -svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) -svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) -svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) @@ -7414,38 +20574,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) svint64_t svld1rq_s64(svbool_t, int64_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) svint16_t svld1rq_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) -svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) -svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) -svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) -svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) -svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) -svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) -svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) -svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) -svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) -svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) -svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) -svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) -svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) -svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) -svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) -svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32))) svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64))) @@ -7470,62 +20598,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64))) svint64_t svld1sb_s64(svbool_t, int8_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16))) svint16_t svld1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) -svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) -svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) -svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) -svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) -svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) -svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) -svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) -svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) -svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) -svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) -svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) -svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) -svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) -svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) -svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) -svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) -svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) -svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) -svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) -svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) -svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) -svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) -svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) -svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) -svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) -svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) -svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) -svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32))) svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64))) @@ -7542,34 +20614,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32))) svint32_t svld1sh_s32(svbool_t, int16_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64))) svint64_t svld1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) -svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) -svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) -svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) -svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) -svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) -svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) -svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) -svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) -svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) -svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) -svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) -svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) -svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) -svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64))) svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64))) @@ -7578,38 +20622,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64))) svuint64_t svld1sw_u64(svbool_t, int32_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64))) svint64_t svld1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) -svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) -svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) -svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) -svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) -svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) -svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) -svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) -svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) -svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) -svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) -svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) -svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) -svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) -svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) -svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) -svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32))) svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64))) @@ -7634,62 +20646,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64))) svint64_t svld1ub_s64(svbool_t, uint8_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16))) svint16_t svld1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) -svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) -svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) -svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) -svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) -svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) -svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) -svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) -svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) -svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) -svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) -svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) -svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) -svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) -svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) -svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) -svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) -svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) -svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) -svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) -svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) -svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) -svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) -svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) -svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) -svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) -svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) -svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) -svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32))) svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64))) @@ -7706,34 +20662,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32))) svint32_t svld1uh_s32(svbool_t, uint16_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64))) svint64_t svld1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) -svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) -svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) -svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) -svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) -svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) -svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) -svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) -svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) -svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) -svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) -svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) -svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) -svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) -svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64))) svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64))) @@ -7874,602 +20802,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) -svuint8_t svldff1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) -svuint32_t svldff1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) -svuint64_t svldff1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) -svuint16_t svldff1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) -svint8_t svldff1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) -svfloat64_t svldff1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) -svfloat32_t svldff1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) -svfloat16_t svldff1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) -svint32_t svldff1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) -svint64_t svldff1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) -svint16_t svldff1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) -svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) -svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) -svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) -svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) -svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) -svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) -svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) -svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) -svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) -svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) -svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) -svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) -svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) -svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) -svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) -svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) -svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) -svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) -svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) -svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) -svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) -svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) -svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) -svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) -svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) -svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) -svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) -svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) -svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) -svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) -svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) -svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) -svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) -svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) -svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) -svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) -svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) -svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) -svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) -svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) -svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) -svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) -svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) -svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) -svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) -svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) -svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) -svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) -svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) -svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) -svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) -svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) -svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) -svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) -svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) -svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) -svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) -svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) -svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) -svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) -svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) -svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) -svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) -svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) -svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) -svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) -svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) -svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) -svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) -svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) -svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) -svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) -svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) -svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) -svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) -svuint32_t svldff1sb_u32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) -svuint64_t svldff1sb_u64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) -svuint16_t svldff1sb_u16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) -svint32_t svldff1sb_s32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) -svint64_t svldff1sb_s64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) -svint16_t svldff1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) -svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) -svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) -svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) -svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) -svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) -svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) -svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) -svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) -svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) -svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) -svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) -svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) -svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) -svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) -svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) -svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) -svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) -svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) -svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) -svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) -svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) -svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) -svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) -svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) -svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) -svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) -svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) -svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) -svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) -svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) -svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) -svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) -svuint32_t svldff1sh_u32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) -svuint64_t svldff1sh_u64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) -svint32_t svldff1sh_s32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) -svint64_t svldff1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) -svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) -svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) -svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) -svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) -svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) -svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) -svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) -svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) -svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) -svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) -svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) -svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) -svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) -svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) -svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) -svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) -svuint64_t svldff1sw_u64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) -svint64_t svldff1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) -svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) -svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) -svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) -svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) -svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) -svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) -svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) -svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) -svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) -svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) -svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) -svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) -svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) -svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) -svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) -svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) -svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) -svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) -svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) -svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) -svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) -svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) -svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) -svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) -svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) -svint32_t svldff1ub_s32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) -svint64_t svldff1ub_s64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) -svint16_t svldff1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) -svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) -svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) -svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) -svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) -svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) -svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) -svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) -svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) -svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) -svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) -svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) -svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) -svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) -svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) -svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) -svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) -svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) -svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) -svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) -svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) -svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) -svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) -svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) -svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) -svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) -svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) -svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) -svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) -svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) -svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) -svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) -svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) -svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) -svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) -svint32_t svldff1uh_s32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) -svint64_t svldff1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) -svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) -svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) -svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) -svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) -svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) -svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) -svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) -svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) -svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) -svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) -svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) -svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) -svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) -svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) -svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) -svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) -svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) -svint64_t svldff1uw_s64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) -svuint8_t svldnf1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) -svuint32_t svldnf1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) -svuint64_t svldnf1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) -svuint16_t svldnf1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) -svint8_t svldnf1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) -svfloat64_t svldnf1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) -svfloat32_t svldnf1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) -svfloat16_t svldnf1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) -svint32_t svldnf1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) -svint64_t svldnf1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) -svint16_t svldnf1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) -svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) -svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) -svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) -svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) -svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) -svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) -svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) -svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) -svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) -svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) -svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) -svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) -svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) -svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) -svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) -svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) -svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) -svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) -svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) -svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) -svint32_t svldnf1sb_s32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) -svint64_t svldnf1sb_s64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) -svint16_t svldnf1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) -svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) -svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) -svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) -svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) -svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) -svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) -svint32_t svldnf1sh_s32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) -svint64_t svldnf1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) -svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) -svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) -svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) -svint64_t svldnf1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) -svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) -svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) -svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) -svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) -svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) -svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) -svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) -svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) -svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) -svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) -svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) -svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) -svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) -svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) -svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) -svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) -svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) -svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) -svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) -svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) -svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) -svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) -svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) -svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) svuint8_t svldnt1_u8(svbool_t, uint8_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) @@ -10362,82 +22694,18 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16))) svbool_t svpnext_b16(svbool_t, svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb))) void svprfb(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) -void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) -void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) -void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) -void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) -void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) -void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) -void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) -void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum))) void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd))) void svprfd(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) -void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) -void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) -void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) -void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) -void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) -void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) -void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) -void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum))) void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh))) void svprfh(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) -void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) -void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) -void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) -void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) -void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) -void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) -void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) -void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum))) void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw))) void svprfw(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) -void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) -void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) -void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) -void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) -void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) -void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) -void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) -void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum))) void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any))) @@ -10838,10 +23106,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) svint64_t svrbit_s64_z(svbool_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) svint16_t svrbit_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) -svbool_t svrdffr(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) -svbool_t svrdffr_z(svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) svfloat64_t svrecpe_f64(svfloat64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) @@ -11238,8 +23502,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) -void svsetffr(void); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) @@ -11302,90 +23564,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) void svst1_s64(svbool_t, int64_t *, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) void svst1_s16(svbool_t, int16_t *, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) -void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) -void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) -void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) -void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) -void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) -void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) -void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) -void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) -void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) -void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) -void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) -void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) -void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) -void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) -void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) -void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) -void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) -void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) -void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) -void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) -void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) -void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) -void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) -void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) -void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) -void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) -void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) -void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) -void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) -void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) -void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) -void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) -void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) -void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) -void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) -void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) -void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) -void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) -void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) -void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) -void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) -void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) @@ -11420,38 +23598,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) void svst1b_u64(svbool_t, uint8_t *, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) void svst1b_u16(svbool_t, uint8_t *, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) -void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) -void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) -void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) -void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) -void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) -void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) -void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) -void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) -void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) -void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) -void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) -void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) -void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) -void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) -void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) -void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) @@ -11472,62 +23618,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) void svst1h_u32(svbool_t, uint16_t *, svuint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) void svst1h_u64(svbool_t, uint16_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) -void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) -void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) -void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) -void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) -void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) -void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) -void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) -void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) -void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) -void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) -void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) -void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) -void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) -void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) -void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) -void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) -void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) -void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) -void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) -void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) -void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) -void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) -void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) -void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) -void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) -void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) -void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) -void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) @@ -11540,34 +23630,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) void svst1w_s64(svbool_t, int32_t *, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) void svst1w_u64(svbool_t, uint32_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) -void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) -void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) -void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) -void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) -void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) -void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) -void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) -void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) -void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) -void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) -void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) -void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) -void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) -void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) @@ -12034,12 +24096,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) svint64_t svtbl_s64(svint64_t, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) svint16_t svtbl_s16(svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) -svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) -svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) -svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) svuint8_t svtrn1_u8(svuint8_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) @@ -12100,18 +24156,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64))) svbool_t svtrn2_b64(svbool_t, svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8))) svbool_t svtrn2_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) -svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) -svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) -svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) -svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) -svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) -svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8))) svuint8x2_t svundef2_u8(void); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32))) @@ -12352,8 +24396,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) svbool_t svwhilelt_b64_s64(int64_t, int64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) svbool_t svwhilelt_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) -void svwrffr(svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) svuint8_t svzip1_u8(svuint8_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) @@ -12796,38 +24838,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) float32_t svaddv(svbool_t, svfloat32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) float16_t svaddv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) -svuint32_t svadrb_offset(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) -svuint64_t svadrb_offset(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) -svuint32_t svadrb_offset(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) -svuint64_t svadrb_offset(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) -svuint32_t svadrd_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) -svuint64_t svadrd_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) -svuint32_t svadrd_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) -svuint64_t svadrd_index(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) -svuint32_t svadrh_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) -svuint64_t svadrh_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) -svuint32_t svadrh_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) -svuint64_t svadrh_index(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) -svuint32_t svadrw_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) -svuint64_t svadrw_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) -svuint32_t svadrw_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) -svuint64_t svadrw_index(svuint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) svbool_t svand_z(svbool_t, svbool_t, svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) @@ -13872,18 +25882,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) svuint64_t svcnt_z(svbool_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) svuint16_t svcnt_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) -svuint32_t svcompact(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) -svuint64_t svcompact(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) -svfloat64_t svcompact(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) -svfloat32_t svcompact(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) -svint32_t svcompact(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) -svint64_t svcompact(svbool_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) svuint8x2_t svcreate2(svuint8_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) @@ -14630,12 +26628,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) int64_t sveorv(svbool_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) int16_t sveorv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) -svfloat64_t svexpa(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) -svfloat32_t svexpa(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) -svfloat16_t svexpa(svuint16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) svuint8_t svext(svuint8_t, svuint8_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) @@ -14884,90 +26876,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) svint64_t svld1(svbool_t, int64_t const *); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) svint16_t svld1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) -svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) -svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) -svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) -svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) -svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) -svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) -svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) -svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) -svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) -svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) -svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) -svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) -svuint32_t svld1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) -svuint64_t svld1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) -svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) -svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) -svint32_t svld1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) -svint64_t svld1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) -svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) -svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) -svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) -svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) -svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) -svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) -svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) -svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) -svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) -svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) -svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) -svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) -svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) -svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) -svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) -svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) -svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) -svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) -svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) -svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) -svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) -svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) -svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) -svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) @@ -15012,238 +26920,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) svint64_t svld1rq(svbool_t, int64_t const *); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) svint16_t svld1rq(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) -svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) -svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) -svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) -svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) -svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) -svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) -svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) -svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) -svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) -svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) -svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) -svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) -svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) -svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) -svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) -svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) -svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) -svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) -svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) -svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) svuint8x2_t svld2(svbool_t, uint8_t const *); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) @@ -15376,410 +27052,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) -svuint8_t svldff1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) -svuint32_t svldff1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) -svuint64_t svldff1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) -svuint16_t svldff1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) -svint8_t svldff1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) -svfloat64_t svldff1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) -svfloat32_t svldff1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) -svfloat16_t svldff1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) -svint32_t svldff1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) -svint64_t svldff1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) -svint16_t svldff1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) -svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) -svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) -svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) -svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) -svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) -svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) -svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) -svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) -svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) -svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) -svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) -svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) -svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) -svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) -svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) -svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) -svint32_t svldff1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) -svint64_t svldff1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) -svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) -svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) -svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) -svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) -svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) -svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) -svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) -svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) -svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) -svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) -svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) -svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) -svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) -svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) -svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) -svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) -svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) -svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) -svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) -svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) -svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) -svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) -svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) -svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) -svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) -svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) -svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) -svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) -svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) -svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) -svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) -svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) -svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) -svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) -svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) -svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) -svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) -svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) -svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) -svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) -svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) -svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) -svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) -svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) -svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) -svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) -svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) -svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) -svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) -svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) -svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) -svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) -svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) -svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) -svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) -svuint8_t svldnf1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) -svuint32_t svldnf1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) -svuint64_t svldnf1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) -svuint16_t svldnf1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) -svint8_t svldnf1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) -svfloat64_t svldnf1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) -svfloat32_t svldnf1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) -svfloat16_t svldnf1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) -svint32_t svldnf1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) -svint64_t svldnf1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) -svint16_t svldnf1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) -svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) -svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) -svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) -svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) -svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) -svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) -svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) -svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) -svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) -svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) -svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) svuint8_t svldnt1(svbool_t, uint8_t const *); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) @@ -17662,70 +28934,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) svbool_t svpfalse(void); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) svbool_t svpfirst(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) -void svprfb_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) -void svprfb_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) -void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) -void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) -void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) -void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) -void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) -void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) -void svprfd_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) -void svprfd_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) -void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) -void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) -void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) -void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) -void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) -void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) -void svprfh_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) -void svprfh_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) -void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) -void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) -void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) -void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) -void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) -void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) -void svprfw_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) -void svprfw_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) -void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) -void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) -void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) -void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) -void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) -void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) svint8_t svqadd(svint8_t, int8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) @@ -18552,90 +29760,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) void svst1(svbool_t, int64_t *, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) void svst1(svbool_t, int16_t *, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) -void svst1_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) -void svst1_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) -void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) -void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) -void svst1_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) -void svst1_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) -void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) -void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) -void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) -void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) -void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) -void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) -void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) -void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) -void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) -void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) -void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) -void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) -void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) -void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) -void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) -void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) -void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) -void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) -void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) -void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) -void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) -void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) -void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) -void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) @@ -18670,38 +29794,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) void svst1b(svbool_t, uint8_t *, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) void svst1b(svbool_t, uint8_t *, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) -void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) -void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) -void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) -void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) -void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) -void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) -void svst1b_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) -void svst1b_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) -void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) -void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) -void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) -void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) @@ -18722,62 +29814,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) void svst1h(svbool_t, uint16_t *, svuint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) void svst1h(svbool_t, uint16_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) -void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) -void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) -void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) -void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) -void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) -void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) -void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) -void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) -void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) -void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) -void svst1h_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) -void svst1h_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) -void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) -void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) -void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) -void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) -void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) -void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) -void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) -void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) -void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) -void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) -void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) -void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) @@ -18790,34 +29826,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) void svst1w(svbool_t, int32_t *, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) void svst1w(svbool_t, uint32_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) -void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) -void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) -void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) -void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) -void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) -void svst1w_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) -void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) -void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) -void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) -void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) -void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) -void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) -void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) -void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) @@ -19284,12 +30292,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) svint64_t svtbl(svint64_t, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) svint16_t svtbl(svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) -svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) -svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) -svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) svuint8_t svtrn1(svuint8_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) @@ -19334,18 +30336,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) svint64_t svtrn2(svint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) svint16_t svtrn2(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) -svfloat64_t svtsmul(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) -svfloat32_t svtsmul(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) -svfloat16_t svtsmul(svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) -svfloat64_t svtssel(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) -svfloat32_t svtssel(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) -svfloat16_t svtssel(svfloat16_t, svuint16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) svbool_t svunpkhi(svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) @@ -19526,11444 +30516,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) svint64_t svzip2(svint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) svint16_t svzip2(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_n_bf16(bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) -svbfloat16x2_t svundef2_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) -svbfloat16x3_t svundef3_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) -svbfloat16x4_t svundef4_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) -svbfloat16_t svundef_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_x(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_z(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_bf16(bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2(svbfloat16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3(svbfloat16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4(svbfloat16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl(svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q_s16(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla(svint32_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot(svint32_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot(svint32_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot(svint32_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) -svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) -svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) -svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) -svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) -svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) -svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) -svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) -svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) -svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) -svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) -svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) -svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) -svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) -svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) -svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) -svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) -svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) -svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) -svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) -svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) -svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) -svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) -svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) -svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) -svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) -svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) -svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) -svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) -svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) -svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) -svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) -svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) -svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) -svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) -svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) -svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) -svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) -svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) -svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) -svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) -svint32_t svabdlb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) -svint64_t svabdlb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) -svint16_t svabdlb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) -svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) -svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) -svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) -svint32_t svabdlb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) -svint64_t svabdlb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) -svint16_t svabdlb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) -svuint32_t svabdlb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) -svuint64_t svabdlb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) -svuint16_t svabdlb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) -svint32_t svabdlt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) -svint64_t svabdlt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) -svint16_t svabdlt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) -svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) -svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) -svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) -svint32_t svabdlt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) -svint64_t svabdlt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) -svint16_t svabdlt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) -svuint32_t svabdlt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) -svuint64_t svabdlt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) -svuint16_t svabdlt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) -svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) -svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) -svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) -svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) -svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) -svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) -svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) -svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) -svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) -svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) -svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) -svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) -svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) -svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) -svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) -svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) -svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) -svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) -svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) -svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) -svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) -svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) -svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) -svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) -svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) -svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) -svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) -svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) -svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) -svint16_t svaddhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) -svint32_t svaddhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) -svint8_t svaddhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) -svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) -svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) -svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) -svint16_t svaddhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) -svint32_t svaddhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) -svint8_t svaddhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) -svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) -svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) -svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) -svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) -svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) -svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) -svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) -svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) -svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) -svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) -svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) -svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) -svint32_t svaddlb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) -svint64_t svaddlb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) -svint16_t svaddlb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) -svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) -svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) -svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) -svint32_t svaddlb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) -svint64_t svaddlb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) -svint16_t svaddlb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) -svuint32_t svaddlb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) -svuint64_t svaddlb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) -svuint16_t svaddlb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) -svint32_t svaddlbt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) -svint64_t svaddlbt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) -svint16_t svaddlbt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) -svint32_t svaddlbt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) -svint64_t svaddlbt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) -svint16_t svaddlbt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) -svint32_t svaddlt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) -svint64_t svaddlt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) -svint16_t svaddlt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) -svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) -svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) -svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) -svint32_t svaddlt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) -svint64_t svaddlt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) -svint16_t svaddlt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) -svuint32_t svaddlt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) -svuint64_t svaddlt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) -svuint16_t svaddlt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) -svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) -svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) -svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) -svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) -svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) -svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) -svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) -svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) -svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) -svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) -svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) -svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) -svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) -svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) -svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) -svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) -svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) -svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) -svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) -svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) -svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) -svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) -svint32_t svaddwb_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) -svint64_t svaddwb_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) -svint16_t svaddwb_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) -svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) -svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) -svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) -svint32_t svaddwb_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) -svint64_t svaddwb_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) -svint16_t svaddwb_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) -svuint32_t svaddwb_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) -svuint64_t svaddwb_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) -svuint16_t svaddwb_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) -svint32_t svaddwt_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) -svint64_t svaddwt_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) -svint16_t svaddwt_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) -svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) -svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) -svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) -svint32_t svaddwt_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) -svint64_t svaddwt_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) -svint16_t svaddwt_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) -svuint32_t svaddwt_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) -svuint64_t svaddwt_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) -svuint16_t svaddwt_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) -svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) -svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) -svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) -svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) -svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) -svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) -svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) -svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) -svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) -svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) -svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) -svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) -svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) -svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) -svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) -svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) -svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) -svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) -svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) -svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) -svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) -svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) -svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) -svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) -svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) -svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) -svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) -svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) -svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) -svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) -svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) -svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) -svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) -svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) -svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) -svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) -svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) -svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) -svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) -svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) -svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) -svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) -svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) -svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) -svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) -svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) -svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) -svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) -svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) -svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) -svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) -svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) -svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) -svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) -svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) -svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) -svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) -svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) -svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) -svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) -svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) -svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) -svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) -svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) -svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) -svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) -svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) -svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) -svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) -svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) -svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) -svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) -svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) -svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) -svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) -svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) -svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) -svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) -svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) -svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) -svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) -svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) -svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) -svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) -svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) -svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) -svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) -svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) -svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) -svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) -svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) -svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) -svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) -svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) -svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) -svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) -svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) -svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) -svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) -svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) -svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) -svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) -svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) -svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) -svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) -svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) -svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) -svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) -svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) -svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) -svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) -svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) -svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) -svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) -svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) -svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) -svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) -svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) -svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) -svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) -svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) -svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) -svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) -svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) -svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) -svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) -svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) -svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) -svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) -svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) -svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) -svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) -svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) -svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) -svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) -svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) -svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) -svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) -svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) -svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) -svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) -svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) -svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) -svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) -svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) -svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) -svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) -svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) -svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) -svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) -svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) -svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) -svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) -svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) -svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) -svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) -svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) -svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) -svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) -svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) -svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) -svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) -svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) -svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) -svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) -svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) -svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) -svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) -svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) -svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) -svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) -svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) -svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) -svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) -svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) -svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) -svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) -svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) -svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) -svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) -svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) -svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) -svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) -svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) -svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) -svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) -svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) -svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) -svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) -svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) -svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) -svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) -svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) -svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) -svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) -svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) -svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) -svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) -svuint8_t svhistseg_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) -svuint8_t svhistseg_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) -svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) -svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) -svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) -svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) -svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) -svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) -svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) -svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) -svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) -svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) -svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) -svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) -svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) -svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) -svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) -svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) -svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) -svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) -svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) -svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) -svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) -svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) -svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) -svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) -svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) -svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) -svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) -svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) -svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) -svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) -svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) -svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) -svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) -svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) -svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) -svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) -svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) -svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) -svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) -svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) -svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) -svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) -svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) -svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) -svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) -svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) -svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) -svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) -svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) -svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) -svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) -svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) -svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) -svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) -svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) -svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) -svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) -svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) -svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) -svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) -svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) -svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) -svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) -svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) -svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) -svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) -svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) -svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) -svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) -svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) -svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) -svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) -svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) -svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) -svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) -svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) -svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) -svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) -svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) -svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) -svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) -svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) -svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) -svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) -svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) -svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) -svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) -svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) -svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) -svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) -svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) -svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) -svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) -svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) -svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) -svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) -svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) -svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) -svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) -svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) -svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) -svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) -svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) -svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) -svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) -svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) -svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) -svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) -svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) -svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) -svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) -svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) -svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) -svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) -svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) -svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) -svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) -svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) -svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) -svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) -svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) -svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) -svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) -svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) -svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) -svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) -svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) -svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) -svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) -svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) -svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) -svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) -svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) -svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) -svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) -svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) -svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) -svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) -svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) -svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) -svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) -svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) -svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) -svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) -svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) -svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) -svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) -svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) -svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) -svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) -svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) -svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) -svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) -svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) -svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) -svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) -svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) -svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) -svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) -svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) -svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) -svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) -svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) -svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) -svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) -svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) -svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) -svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) -svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) -svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) -svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) -svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) -svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) -svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) -svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) -svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) -svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) -svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) -svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) -svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) -svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) -svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) -svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) -svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) -svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) -svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) -svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) -svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) -svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) -svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) -svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) -svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) -svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) -svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) -svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) -svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) -svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) -svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) -svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) -svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) -svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) -svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) -svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) -svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) -svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) -svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) -svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) -svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) -svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) -svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) -svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) -svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) -svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) -svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) -svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) -svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) -svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) -svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) -svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) -svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) -svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) -svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) -svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) -svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) -svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) -svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) -svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) -svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) -svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) -svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) -svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) -svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) -svint64_t svlogb_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) -svint32_t svlogb_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) -svint16_t svlogb_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) -svint64_t svlogb_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) -svint32_t svlogb_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) -svint16_t svlogb_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) -svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) -svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) -svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) -svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) -svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) -svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) -svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) -svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) -svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) -svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) -svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) -svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) -svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) -svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) -svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) -svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) -svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) -svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) -svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) -svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) -svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) -svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) -svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) -svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) -svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) -svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) -svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) -svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) -svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) -svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) -svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) -svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) -svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) -svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) -svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) -svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) -svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) -svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) -svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) -svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) -svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) -svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) -svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) -svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) -svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) -svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) -svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) -svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) -svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) -svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) -svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) -svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) -svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) -svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) -svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) -svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) -svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) -svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) -svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) -svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) -svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) -svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) -svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) -svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) -svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) -svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) -svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) -svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) -svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) -svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) -svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) -svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) -svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) -svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) -svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) -svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) -svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) -svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) -svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) -svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) -svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) -svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) -svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) -svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) -svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) -svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) -svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) -svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) -svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) -svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) -svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) -svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) -svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) -svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) -svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) -svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) -svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) -svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) -svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) -svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) -svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) -svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) -svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) -svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) -svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) -svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) -svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) -svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) -svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) -svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) -svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) -svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) -svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) -svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) -svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) -svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) -svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) -svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) -svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) -svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) -svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) -svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) -svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) -svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) -svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) -svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) -svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) -svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) -svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) -svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) -svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) -svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) -svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) -svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) -svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) -svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) -svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) -svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) -svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) -svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) -svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) -svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) -svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) -svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) -svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) -svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) -svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) -svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) -svint32_t svmovlb_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) -svint64_t svmovlb_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) -svint16_t svmovlb_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) -svuint32_t svmovlb_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) -svuint64_t svmovlb_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) -svuint16_t svmovlb_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) -svint32_t svmovlt_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) -svint64_t svmovlt_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) -svint16_t svmovlt_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) -svuint32_t svmovlt_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) -svuint64_t svmovlt_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) -svuint16_t svmovlt_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) -svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) -svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) -svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) -svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) -svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) -svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) -svint32_t svmullb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) -svint64_t svmullb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) -svint16_t svmullb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) -svuint32_t svmullb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) -svuint64_t svmullb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) -svuint16_t svmullb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) -svint32_t svmullb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) -svint64_t svmullb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) -svint16_t svmullb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) -svuint32_t svmullb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) -svuint64_t svmullb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) -svuint16_t svmullb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) -svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) -svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) -svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) -svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) -svint32_t svmullt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) -svint64_t svmullt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) -svint16_t svmullt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) -svuint32_t svmullt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) -svuint64_t svmullt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) -svuint16_t svmullt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) -svint32_t svmullt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) -svint64_t svmullt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) -svint16_t svmullt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) -svuint32_t svmullt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) -svuint64_t svmullt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) -svuint16_t svmullt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) -svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) -svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) -svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) -svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) -svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) -svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) -svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) -svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) -svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) -svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) -svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) -svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) -svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) -svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) -svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) -svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) -svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) -svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) -svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) -svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) -svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) -svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) -svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) -svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) -svuint8_t svpmul_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) -svuint8_t svpmul_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) -svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) -svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) -svuint64_t svpmullb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) -svuint16_t svpmullb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) -svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) -svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) -svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) -svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) -svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) -svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) -svuint64_t svpmullt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) -svuint16_t svpmullt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) -svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) -svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) -svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) -svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) -svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) -svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) -svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) -svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) -svint8_t svqabs_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) -svint32_t svqabs_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) -svint64_t svqabs_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) -svint16_t svqabs_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) -svint8_t svqabs_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) -svint32_t svqabs_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) -svint64_t svqabs_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) -svint16_t svqabs_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) -svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) -svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) -svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) -svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) -svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) -svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) -svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) -svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) -svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) -svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) -svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) -svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) -svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) -svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) -svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) -svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) -svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) -svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) -svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) -svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) -svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) -svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) -svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) -svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) -svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) -svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) -svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) -svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) -svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) -svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) -svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) -svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) -svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) -svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) -svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) -svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) -svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) -svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) -svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) -svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) -svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) -svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) -svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) -svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) -svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) -svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) -svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) -svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) -svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) -svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) -svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) -svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) -svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) -svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) -svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) -svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) -svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) -svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) -svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) -svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) -svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) -svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) -svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) -svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) -svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) -svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) -svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) -svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) -svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) -svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) -svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) -svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) -svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) -svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) -svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) -svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) -svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) -svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) -svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) -svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) -svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) -svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) -svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) -svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) -svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) -svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) -svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) -svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) -svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) -svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) -svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) -svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) -svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) -svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) -svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) -svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) -svint8_t svqdmulh_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) -svint32_t svqdmulh_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) -svint64_t svqdmulh_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) -svint16_t svqdmulh_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) -svint8_t svqdmulh_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) -svint32_t svqdmulh_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) -svint64_t svqdmulh_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) -svint16_t svqdmulh_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) -svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) -svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) -svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) -svint32_t svqdmullb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) -svint64_t svqdmullb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) -svint16_t svqdmullb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) -svint32_t svqdmullb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) -svint64_t svqdmullb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) -svint16_t svqdmullb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) -svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) -svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) -svint32_t svqdmullt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) -svint64_t svqdmullt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) -svint16_t svqdmullt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) -svint32_t svqdmullt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) -svint64_t svqdmullt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) -svint16_t svqdmullt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) -svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) -svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) -svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) -svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) -svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) -svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) -svint8_t svqneg_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) -svint32_t svqneg_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) -svint64_t svqneg_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) -svint16_t svqneg_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) -svint8_t svqneg_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) -svint32_t svqneg_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) -svint64_t svqneg_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) -svint16_t svqneg_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) -svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) -svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) -svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) -svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) -svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) -svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) -svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) -svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) -svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) -svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) -svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) -svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) -svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) -svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) -svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) -svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) -svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) -svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) -svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) -svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) -svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) -svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) -svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) -svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) -svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) -svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) -svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) -svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) -svint8_t svqrdmulh_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) -svint32_t svqrdmulh_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) -svint64_t svqrdmulh_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) -svint16_t svqrdmulh_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) -svint8_t svqrdmulh_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) -svint32_t svqrdmulh_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) -svint64_t svqrdmulh_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) -svint16_t svqrdmulh_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) -svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) -svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) -svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) -svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) -svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) -svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) -svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) -svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) -svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) -svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) -svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) -svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) -svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) -svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) -svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) -svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) -svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) -svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) -svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) -svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) -svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) -svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) -svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) -svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) -svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) -svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) -svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) -svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) -svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) -svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) -svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) -svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) -svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) -svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) -svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) -svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) -svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) -svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) -svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) -svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) -svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) -svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) -svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) -svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) -svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) -svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) -svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) -svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) -svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) -svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) -svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) -svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) -svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) -svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) -svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) -svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) -svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) -svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) -svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) -svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) -svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) -svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) -svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) -svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) -svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) -svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) -svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) -svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) -svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) -svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) -svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) -svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) -svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) -svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) -svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) -svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) -svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) -svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) -svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) -svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) -svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) -svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) -svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) -svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) -svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) -svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) -svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) -svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) -svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) -svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) -svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) -svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) -svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) -svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) -svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) -svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) -svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) -svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) -svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) -svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) -svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) -svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) -svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) -svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) -svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) -svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) -svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) -svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) -svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) -svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) -svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) -svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) -svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) -svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) -svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) -svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) -svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) -svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) -svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) -svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) -svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) -svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) -svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) -svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) -svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) -svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) -svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) -svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) -svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) -svint16_t svqshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) -svint32_t svqshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) -svint8_t svqshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) -svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) -svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) -svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) -svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) -svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) -svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) -svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) -svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) -svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) -svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) -svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) -svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) -svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) -svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) -svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) -svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) -svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) -svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) -svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) -svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) -svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) -svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) -svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) -svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) -svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) -svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) -svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) -svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) -svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) -svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) -svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) -svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) -svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) -svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) -svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) -svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) -svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) -svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) -svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) -svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) -svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) -svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) -svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) -svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) -svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) -svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) -svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) -svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) -svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) -svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) -svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) -svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) -svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) -svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) -svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) -svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) -svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) -svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) -svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) -svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) -svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) -svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) -svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) -svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) -svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) -svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) -svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) -svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) -svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) -svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) -svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) -svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) -svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) -svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) -svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) -svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) -svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) -svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) -svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) -svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) -svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) -svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) -svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) -svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) -svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) -svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) -svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) -svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) -svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) -svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) -svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) -svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) -svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) -svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) -svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) -svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) -svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) -svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) -svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) -svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) -svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) -svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) -svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) -svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) -svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) -svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) -svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) -svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) -svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) -svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) -svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) -svint16_t svqxtnb_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) -svint32_t svqxtnb_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) -svint8_t svqxtnb_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) -svuint16_t svqxtnb_u32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) -svuint32_t svqxtnb_u64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) -svuint8_t svqxtnb_u16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) -svint16_t svqxtnt_s32(svint16_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) -svint32_t svqxtnt_s64(svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) -svint8_t svqxtnt_s16(svint8_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) -svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) -svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) -svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) -svuint16_t svqxtunb_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) -svuint32_t svqxtunb_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) -svuint8_t svqxtunb_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) -svuint16_t svqxtunt_s32(svuint16_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) -svuint32_t svqxtunt_s64(svuint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) -svuint8_t svqxtunt_s16(svuint8_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) -svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) -svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) -svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) -svint16_t svraddhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) -svint32_t svraddhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) -svint8_t svraddhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) -svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) -svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) -svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) -svint16_t svraddhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) -svint32_t svraddhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) -svint8_t svraddhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) -svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) -svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) -svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) -svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) -svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) -svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) -svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) -svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) -svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) -svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) -svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) -svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) -svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) -svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) -svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) -svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) -svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) -svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) -svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) -svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) -svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) -svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) -svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) -svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) -svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) -svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) -svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) -svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) -svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) -svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) -svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) -svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) -svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) -svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) -svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) -svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) -svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) -svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) -svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) -svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) -svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) -svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) -svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) -svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) -svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) -svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) -svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) -svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) -svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) -svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) -svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) -svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) -svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) -svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) -svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) -svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) -svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) -svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) -svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) -svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) -svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) -svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) -svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) -svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) -svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) -svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) -svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) -svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) -svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) -svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) -svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) -svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) -svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) -svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) -svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) -svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) -svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) -svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) -svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) -svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) -svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) -svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) -svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) -svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) -svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) -svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) -svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) -svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) -svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) -svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) -svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) -svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) -svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) -svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) -svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) -svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) -svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) -svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) -svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) -svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) -svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) -svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) -svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) -svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) -svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) -svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) -svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) -svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) -svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) -svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) -svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) -svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) -svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) -svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) -svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) -svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) -svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) -svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) -svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) -svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) -svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) -svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) -svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) -svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) -svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) -svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) -svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) -svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) -svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) -svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) -svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) -svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) -svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) -svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) -svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) -svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) -svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) -svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) -svint16_t svrshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) -svint32_t svrshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) -svint8_t svrshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) -svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) -svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) -svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) -svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) -svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) -svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) -svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) -svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) -svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) -svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) -svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) -svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) -svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) -svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) -svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) -svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) -svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) -svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) -svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) -svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) -svint16_t svrsubhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) -svint32_t svrsubhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) -svint8_t svrsubhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) -svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) -svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) -svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) -svint16_t svrsubhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) -svint32_t svrsubhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) -svint8_t svrsubhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) -svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) -svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) -svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) -svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) -svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) -svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) -svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) -svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) -svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) -svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) -svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) -svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) -svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) -svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) -svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) -svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) -svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) -svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) -svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) -svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) -svint32_t svshllb_n_s32(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) -svint64_t svshllb_n_s64(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) -svint16_t svshllb_n_s16(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) -svuint32_t svshllb_n_u32(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) -svuint64_t svshllb_n_u64(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) -svuint16_t svshllb_n_u16(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) -svint32_t svshllt_n_s32(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) -svint64_t svshllt_n_s64(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) -svint16_t svshllt_n_s16(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) -svuint32_t svshllt_n_u32(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) -svuint64_t svshllt_n_u64(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) -svuint16_t svshllt_n_u16(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) -svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) -svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) -svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) -svint16_t svshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) -svint32_t svshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) -svint8_t svshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) -svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) -svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) -svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) -svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) -svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) -svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) -svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) -svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) -svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) -svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) -svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) -svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) -svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) -svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) -svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) -svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) -svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) -svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) -svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) -svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) -svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) -svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) -svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) -svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) -svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) -svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) -svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) -svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) -svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) -svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) -svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) -svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) -svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) -svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) -svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) -svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) -svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) -svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) -svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) -svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) -svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) -svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) -svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) -svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) -svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) -svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) -svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) -svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) -svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) -svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) -svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) -svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) -svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) -svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) -void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) -void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) -void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) -void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) -void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) -void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) -void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) -void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) -void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) -void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) -void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) -void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) -void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) -void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) -void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) -void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) -void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) -void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) -void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) -void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) -void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) -void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) -void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) -void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) -void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) -void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) -void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) -void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) -void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) -void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) -void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) -void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) -void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) -void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) -void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) -void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) -void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) -void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) -void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) -void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) -void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) -void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) -void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) -void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) -void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) -void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) -void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) -void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) -void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) -void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) -void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) -void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) -void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) -void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) -void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) -void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) -void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) -void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) -void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) -void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) -void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) -void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) -void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) -void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) -void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) -void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) -void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) -void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) -void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) -void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) -void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) -void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) -void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) -void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) -void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) -void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) -void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) -void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) -void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) -void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) -void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) -void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) -void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) -svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) -svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) -svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) -svint16_t svsubhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) -svint32_t svsubhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) -svint8_t svsubhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) -svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) -svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) -svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) -svint16_t svsubhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) -svint32_t svsubhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) -svint8_t svsubhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) -svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) -svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) -svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) -svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) -svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) -svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) -svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) -svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) -svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) -svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) -svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) -svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) -svint32_t svsublb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) -svint64_t svsublb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) -svint16_t svsublb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) -svuint32_t svsublb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) -svuint64_t svsublb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) -svuint16_t svsublb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) -svint32_t svsublb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) -svint64_t svsublb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) -svint16_t svsublb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) -svuint32_t svsublb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) -svuint64_t svsublb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) -svuint16_t svsublb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) -svint32_t svsublbt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) -svint64_t svsublbt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) -svint16_t svsublbt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) -svint32_t svsublbt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) -svint64_t svsublbt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) -svint16_t svsublbt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) -svint32_t svsublt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) -svint64_t svsublt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) -svint16_t svsublt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) -svuint32_t svsublt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) -svuint64_t svsublt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) -svuint16_t svsublt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) -svint32_t svsublt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) -svint64_t svsublt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) -svint16_t svsublt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) -svuint32_t svsublt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) -svuint64_t svsublt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) -svuint16_t svsublt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) -svint32_t svsubltb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) -svint64_t svsubltb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) -svint16_t svsubltb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) -svint32_t svsubltb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) -svint64_t svsubltb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) -svint16_t svsubltb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) -svint32_t svsubwb_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) -svint64_t svsubwb_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) -svint16_t svsubwb_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) -svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) -svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) -svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) -svint32_t svsubwb_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) -svint64_t svsubwb_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) -svint16_t svsubwb_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) -svuint32_t svsubwb_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) -svuint64_t svsubwb_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) -svuint16_t svsubwb_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) -svint32_t svsubwt_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) -svint64_t svsubwt_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) -svint16_t svsubwt_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) -svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) -svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) -svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) -svint32_t svsubwt_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) -svint64_t svsubwt_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) -svint16_t svsubwt_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) -svuint32_t svsubwt_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) -svuint64_t svsubwt_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) -svuint16_t svsubwt_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) -svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) -svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) -svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) -svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) -svint8_t svtbl2_s8(svint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) -svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) -svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) -svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) -svint32_t svtbl2_s32(svint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) -svint64_t svtbl2_s64(svint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) -svint16_t svtbl2_s16(svint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) -svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) -svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) -svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) -svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) -svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) -svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) -svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) -svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) -svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) -svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) -svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) -svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) -svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) -svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) -svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) -svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) -svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) -svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) -svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) -svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) -svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) -svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) -svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) -svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) -svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) -svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) -svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) -svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) -svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) -svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) -svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) -svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) -svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) -svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) -svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) -svbool_t svwhilege_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) -svbool_t svwhilege_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) -svbool_t svwhilege_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) -svbool_t svwhilege_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) -svbool_t svwhilege_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) -svbool_t svwhilege_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) -svbool_t svwhilege_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) -svbool_t svwhilege_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) -svbool_t svwhilege_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) -svbool_t svwhilege_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) -svbool_t svwhilege_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) -svbool_t svwhilege_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) -svbool_t svwhilege_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) -svbool_t svwhilege_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) -svbool_t svwhilege_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) -svbool_t svwhilege_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) -svbool_t svwhilegt_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) -svbool_t svwhilegt_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) -svbool_t svwhilegt_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) -svbool_t svwhilegt_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) -svbool_t svwhilegt_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) -svbool_t svwhilegt_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) -svbool_t svwhilegt_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) -svbool_t svwhilegt_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) -svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) -svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) -svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) -svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) -svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) -svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) -svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) -svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) -svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) -svbool_t svwhilerw_s8(int8_t const *, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) -svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) -svbool_t svwhilerw_f64(float64_t const *, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) -svbool_t svwhilerw_s64(int64_t const *, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) -svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) -svbool_t svwhilerw_f16(float16_t const *, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) -svbool_t svwhilerw_s16(int16_t const *, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) -svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) -svbool_t svwhilerw_f32(float32_t const *, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) -svbool_t svwhilerw_s32(int32_t const *, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) -svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) -svbool_t svwhilewr_s8(int8_t const *, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) -svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) -svbool_t svwhilewr_f64(float64_t const *, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) -svbool_t svwhilewr_s64(int64_t const *, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) -svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) -svbool_t svwhilewr_f16(float16_t const *, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) -svbool_t svwhilewr_s16(int16_t const *, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) -svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) -svbool_t svwhilewr_f32(float32_t const *, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) -svbool_t svwhilewr_s32(int32_t const *, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) -svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) -svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) -svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) -svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) -svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) -svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) -svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) -svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) -svint8_t svaba(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) -svint32_t svaba(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) -svint64_t svaba(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) -svint16_t svaba(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) -svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) -svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) -svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) -svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) -svint8_t svaba(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) -svint32_t svaba(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) -svint64_t svaba(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) -svint16_t svaba(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) -svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) -svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) -svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) -svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) -svint32_t svabalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) -svint64_t svabalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) -svint16_t svabalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) -svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) -svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) -svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) -svint32_t svabalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) -svint64_t svabalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) -svint16_t svabalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) -svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) -svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) -svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) -svint32_t svabalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) -svint64_t svabalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) -svint16_t svabalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) -svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) -svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) -svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) -svint32_t svabalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) -svint64_t svabalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) -svint16_t svabalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) -svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) -svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) -svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) -svint32_t svabdlb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) -svint64_t svabdlb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) -svint16_t svabdlb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) -svuint32_t svabdlb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) -svuint64_t svabdlb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) -svuint16_t svabdlb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) -svint32_t svabdlb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) -svint64_t svabdlb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) -svint16_t svabdlb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) -svuint32_t svabdlb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) -svuint64_t svabdlb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) -svuint16_t svabdlb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) -svint32_t svabdlt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) -svint64_t svabdlt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) -svint16_t svabdlt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) -svuint32_t svabdlt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) -svuint64_t svabdlt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) -svuint16_t svabdlt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) -svint32_t svabdlt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) -svint64_t svabdlt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) -svint16_t svabdlt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) -svuint32_t svabdlt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) -svuint64_t svabdlt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) -svuint16_t svabdlt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) -svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) -svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) -svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) -svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) -svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) -svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) -svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) -svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) -svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) -svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) -svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) -svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) -svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) -svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) -svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) -svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) -svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) -svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) -svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) -svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) -svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) -svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) -svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) -svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) -svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) -svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) -svuint16_t svaddhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) -svuint32_t svaddhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) -svuint8_t svaddhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) -svint16_t svaddhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) -svint32_t svaddhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) -svint8_t svaddhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) -svuint16_t svaddhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) -svuint32_t svaddhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) -svuint8_t svaddhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) -svint16_t svaddhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) -svint32_t svaddhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) -svint8_t svaddhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) -svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) -svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) -svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) -svint16_t svaddhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) -svint32_t svaddhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) -svint8_t svaddhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) -svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) -svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) -svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) -svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) -svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) -svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) -svint32_t svaddlb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) -svint64_t svaddlb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) -svint16_t svaddlb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) -svuint32_t svaddlb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) -svuint64_t svaddlb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) -svuint16_t svaddlb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) -svint32_t svaddlb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) -svint64_t svaddlb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) -svint16_t svaddlb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) -svuint32_t svaddlb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) -svuint64_t svaddlb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) -svuint16_t svaddlb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) -svint32_t svaddlbt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) -svint64_t svaddlbt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) -svint16_t svaddlbt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) -svint32_t svaddlbt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) -svint64_t svaddlbt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) -svint16_t svaddlbt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) -svint32_t svaddlt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) -svint64_t svaddlt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) -svint16_t svaddlt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) -svuint32_t svaddlt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) -svuint64_t svaddlt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) -svuint16_t svaddlt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) -svint32_t svaddlt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) -svint64_t svaddlt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) -svint16_t svaddlt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) -svuint32_t svaddlt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) -svuint64_t svaddlt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) -svuint16_t svaddlt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) -svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) -svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) -svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) -svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) -svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) -svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) -svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) -svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) -svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) -svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) -svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) -svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) -svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) -svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) -svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) -svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) -svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) -svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) -svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) -svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) -svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) -svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) -svint32_t svaddwb(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) -svint64_t svaddwb(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) -svint16_t svaddwb(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) -svuint32_t svaddwb(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) -svuint64_t svaddwb(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) -svuint16_t svaddwb(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) -svint32_t svaddwb(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) -svint64_t svaddwb(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) -svint16_t svaddwb(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) -svuint32_t svaddwb(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) -svuint64_t svaddwb(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) -svuint16_t svaddwb(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) -svint32_t svaddwt(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) -svint64_t svaddwt(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) -svint16_t svaddwt(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) -svuint32_t svaddwt(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) -svuint64_t svaddwt(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) -svuint16_t svaddwt(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) -svint32_t svaddwt(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) -svint64_t svaddwt(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) -svint16_t svaddwt(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) -svuint32_t svaddwt(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) -svuint64_t svaddwt(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) -svuint16_t svaddwt(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) -svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) -svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) -svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) -svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) -svint8_t svbcax(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) -svint32_t svbcax(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) -svint64_t svbcax(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) -svint16_t svbcax(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) -svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) -svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) -svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) -svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) -svint8_t svbcax(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) -svint32_t svbcax(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) -svint64_t svbcax(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) -svint16_t svbcax(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) -svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) -svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) -svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) -svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) -svint8_t svbsl1n(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) -svint32_t svbsl1n(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) -svint64_t svbsl1n(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) -svint16_t svbsl1n(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) -svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) -svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) -svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) -svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) -svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) -svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) -svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) -svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) -svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) -svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) -svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) -svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) -svint8_t svbsl2n(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) -svint32_t svbsl2n(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) -svint64_t svbsl2n(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) -svint16_t svbsl2n(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) -svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) -svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) -svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) -svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) -svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) -svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) -svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) -svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) -svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) -svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) -svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) -svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) -svint8_t svbsl(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) -svint32_t svbsl(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) -svint64_t svbsl(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) -svint16_t svbsl(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) -svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) -svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) -svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) -svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) -svint8_t svbsl(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) -svint32_t svbsl(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) -svint64_t svbsl(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) -svint16_t svbsl(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) -svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) -svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) -svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) -svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) -svint8_t svcadd(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) -svint32_t svcadd(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) -svint64_t svcadd(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) -svint16_t svcadd(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) -svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) -svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) -svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) -svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) -svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) -svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) -svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) -svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) -svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) -svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) -svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) -svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) -svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) -svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) -svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) -svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) -svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) -svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) -svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) -svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) -svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) -svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) -svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) -svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) -svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) -svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) -svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) -svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) -svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) -svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) -svint8_t sveor3(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) -svint32_t sveor3(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) -svint64_t sveor3(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) -svint16_t sveor3(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) -svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) -svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) -svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) -svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) -svint8_t sveor3(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) -svint32_t sveor3(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) -svint64_t sveor3(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) -svint16_t sveor3(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) -svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) -svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) -svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) -svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) -svint8_t sveorbt(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) -svint32_t sveorbt(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) -svint64_t sveorbt(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) -svint16_t sveorbt(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) -svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) -svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) -svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) -svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) -svint8_t sveorbt(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) -svint32_t sveorbt(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) -svint64_t sveorbt(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) -svint16_t sveorbt(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) -svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) -svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) -svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) -svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) -svint8_t sveortb(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) -svint32_t sveortb(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) -svint64_t sveortb(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) -svint16_t sveortb(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) -svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) -svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) -svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) -svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) -svint8_t sveortb(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) -svint32_t sveortb(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) -svint64_t sveortb(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) -svint16_t sveortb(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) -svint8_t svhadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) -svint32_t svhadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) -svint64_t svhadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) -svint16_t svhadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) -svint8_t svhadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) -svint32_t svhadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) -svint64_t svhadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) -svint16_t svhadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) -svint8_t svhadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) -svint32_t svhadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) -svint64_t svhadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) -svint16_t svhadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) -svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) -svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) -svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) -svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) -svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) -svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) -svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) -svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) -svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) -svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) -svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) -svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) -svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) -svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) -svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) -svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) -svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) -svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) -svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) -svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) -svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) -svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) -svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) -svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) -svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) -svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) -svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) -svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) -svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) -svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) -svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) -svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) -svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) -svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) -svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) -svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) -svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) -svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) -svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) -svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) -svuint8_t svhistseg(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) -svuint8_t svhistseg(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) -svint8_t svhsub_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) -svint32_t svhsub_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) -svint64_t svhsub_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) -svint16_t svhsub_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) -svint8_t svhsub_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) -svint32_t svhsub_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) -svint64_t svhsub_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) -svint16_t svhsub_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) -svint8_t svhsub_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) -svint32_t svhsub_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) -svint64_t svhsub_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) -svint16_t svhsub_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) -svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) -svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) -svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) -svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) -svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) -svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) -svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) -svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) -svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) -svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) -svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) -svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) -svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) -svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) -svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) -svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) -svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) -svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) -svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) -svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) -svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) -svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) -svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) -svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) -svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) -svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) -svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) -svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) -svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) -svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) -svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) -svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) -svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) -svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) -svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) -svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) -svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) -svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) -svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) -svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) -svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) -svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) -svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) -svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) -svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) -svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) -svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) -svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) -svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) -svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) -svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) -svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) -svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) -svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) -svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) -svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) -svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) -svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) -svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) -svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) -svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) -svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) -svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) -svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) -svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) -svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) -svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) -svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) -svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) -svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) -svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) -svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) -svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) -svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) -svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) -svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) -svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) -svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) -svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) -svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) -svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) -svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) -svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) -svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) -svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) -svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) -svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) -svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) -svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) -svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) -svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) -svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) -svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) -svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) -svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) -svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) -svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) -svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) -svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) -svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) -svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) -svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) -svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) -svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) -svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) -svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) -svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) -svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) -svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) -svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) -svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) -svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) -svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) -svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) -svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) -svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) -svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) -svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) -svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) -svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) -svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) -svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) -svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) -svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) -svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) -svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) -svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) -svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) -svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) -svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) -svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) -svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) -svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) -svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) -svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) -svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) -svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) -svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) -svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) -svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) -svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) -svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) -svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) -svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) -svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) -svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) -svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) -svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) -svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) -svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) -svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) -svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) -svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) -svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) -svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) -svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) -svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) -svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) -svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) -svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) -svint64_t svlogb_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) -svint32_t svlogb_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) -svint16_t svlogb_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) -svint64_t svlogb_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) -svint32_t svlogb_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) -svint16_t svlogb_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) -svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) -svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) -svbool_t svmatch(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) -svbool_t svmatch(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) -svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) -svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) -svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) -svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) -svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) -svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) -svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) -svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) -svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) -svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) -svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) -svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) -svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) -svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) -svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) -svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) -svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) -svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) -svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) -svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) -svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) -svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) -svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) -svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) -svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) -svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) -svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) -svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) -svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) -svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) -svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) -svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) -svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) -svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) -svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) -svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) -svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) -svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) -svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) -svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) -svint8_t svminp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) -svint32_t svminp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) -svint64_t svminp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) -svint16_t svminp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) -svint8_t svminp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) -svint32_t svminp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) -svint64_t svminp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) -svint16_t svminp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) -svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) -svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) -svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) -svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) -svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) -svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) -svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) -svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) -svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) -svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) -svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) -svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) -svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) -svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) -svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) -svint32_t svmlalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) -svint64_t svmlalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) -svint16_t svmlalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) -svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) -svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) -svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) -svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) -svint32_t svmlalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) -svint64_t svmlalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) -svint16_t svmlalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) -svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) -svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) -svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) -svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) -svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) -svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) -svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) -svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) -svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) -svint32_t svmlalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) -svint64_t svmlalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) -svint16_t svmlalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) -svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) -svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) -svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) -svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) -svint32_t svmlalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) -svint64_t svmlalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) -svint16_t svmlalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) -svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) -svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) -svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) -svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) -svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) -svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) -svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) -svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) -svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) -svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) -svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) -svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) -svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) -svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) -svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) -svint32_t svmlslb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) -svint64_t svmlslb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) -svint16_t svmlslb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) -svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) -svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) -svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) -svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) -svint32_t svmlslb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) -svint64_t svmlslb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) -svint16_t svmlslb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) -svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) -svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) -svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) -svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) -svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) -svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) -svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) -svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) -svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) -svint32_t svmlslt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) -svint64_t svmlslt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) -svint16_t svmlslt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) -svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) -svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) -svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) -svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) -svint32_t svmlslt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) -svint64_t svmlslt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) -svint16_t svmlslt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) -svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) -svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) -svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) -svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) -svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) -svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) -svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) -svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) -svint32_t svmovlb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) -svint64_t svmovlb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) -svint16_t svmovlb(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) -svuint32_t svmovlb(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) -svuint64_t svmovlb(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) -svuint16_t svmovlb(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) -svint32_t svmovlt(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) -svint64_t svmovlt(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) -svint16_t svmovlt(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) -svuint32_t svmovlt(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) -svuint64_t svmovlt(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) -svuint16_t svmovlt(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) -svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) -svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) -svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) -svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) -svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) -svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) -svint32_t svmullb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) -svint64_t svmullb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) -svint16_t svmullb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) -svuint32_t svmullb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) -svuint64_t svmullb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) -svuint16_t svmullb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) -svint32_t svmullb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) -svint64_t svmullb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) -svint16_t svmullb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) -svuint32_t svmullb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) -svuint64_t svmullb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) -svuint16_t svmullb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) -svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) -svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) -svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) -svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) -svint32_t svmullt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) -svint64_t svmullt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) -svint16_t svmullt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) -svuint32_t svmullt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) -svuint64_t svmullt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) -svuint16_t svmullt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) -svint32_t svmullt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) -svint64_t svmullt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) -svint16_t svmullt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) -svuint32_t svmullt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) -svuint64_t svmullt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) -svuint16_t svmullt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) -svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) -svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) -svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) -svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) -svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) -svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) -svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) -svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) -svint8_t svnbsl(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) -svint32_t svnbsl(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) -svint64_t svnbsl(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) -svint16_t svnbsl(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) -svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) -svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) -svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) -svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) -svint8_t svnbsl(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) -svint32_t svnbsl(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) -svint64_t svnbsl(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) -svint16_t svnbsl(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) -svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) -svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) -svbool_t svnmatch(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) -svbool_t svnmatch(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) -svuint8_t svpmul(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) -svuint8_t svpmul(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) -svuint64_t svpmullb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) -svuint16_t svpmullb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) -svuint64_t svpmullb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) -svuint16_t svpmullb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) -svuint8_t svpmullb_pair(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) -svuint32_t svpmullb_pair(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) -svuint8_t svpmullb_pair(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) -svuint32_t svpmullb_pair(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) -svuint64_t svpmullt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) -svuint16_t svpmullt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) -svuint64_t svpmullt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) -svuint16_t svpmullt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) -svuint8_t svpmullt_pair(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) -svuint32_t svpmullt_pair(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) -svuint8_t svpmullt_pair(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) -svuint32_t svpmullt_pair(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) -svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) -svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) -svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) -svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) -svint8_t svqabs_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) -svint32_t svqabs_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) -svint64_t svqabs_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) -svint16_t svqabs_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) -svint8_t svqabs_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) -svint32_t svqabs_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) -svint64_t svqabs_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) -svint16_t svqabs_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) -svint8_t svqadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) -svint32_t svqadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) -svint64_t svqadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) -svint16_t svqadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) -svint8_t svqadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) -svint32_t svqadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) -svint64_t svqadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) -svint16_t svqadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) -svint8_t svqadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) -svint32_t svqadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) -svint64_t svqadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) -svint16_t svqadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) -svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) -svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) -svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) -svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) -svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) -svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) -svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) -svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) -svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) -svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) -svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) -svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) -svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) -svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) -svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) -svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) -svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) -svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) -svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) -svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) -svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) -svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) -svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) -svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) -svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) -svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) -svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) -svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) -svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) -svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) -svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) -svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) -svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) -svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) -svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) -svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) -svint8_t svqcadd(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) -svint32_t svqcadd(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) -svint64_t svqcadd(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) -svint16_t svqcadd(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) -svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) -svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) -svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) -svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) -svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) -svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) -svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) -svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) -svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) -svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) -svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) -svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) -svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) -svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) -svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) -svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) -svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) -svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) -svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) -svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) -svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) -svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) -svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) -svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) -svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) -svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) -svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) -svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) -svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) -svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) -svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) -svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) -svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) -svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) -svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) -svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) -svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) -svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) -svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) -svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) -svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) -svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) -svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) -svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) -svint8_t svqdmulh(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) -svint32_t svqdmulh(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) -svint64_t svqdmulh(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) -svint16_t svqdmulh(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) -svint8_t svqdmulh(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) -svint32_t svqdmulh(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) -svint64_t svqdmulh(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) -svint16_t svqdmulh(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) -svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) -svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) -svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) -svint32_t svqdmullb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) -svint64_t svqdmullb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) -svint16_t svqdmullb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) -svint32_t svqdmullb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) -svint64_t svqdmullb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) -svint16_t svqdmullb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) -svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) -svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) -svint32_t svqdmullt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) -svint64_t svqdmullt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) -svint16_t svqdmullt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) -svint32_t svqdmullt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) -svint64_t svqdmullt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) -svint16_t svqdmullt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) -svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) -svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) -svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) -svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) -svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) -svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) -svint8_t svqneg_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) -svint32_t svqneg_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) -svint64_t svqneg_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) -svint16_t svqneg_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) -svint8_t svqneg_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) -svint32_t svqneg_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) -svint64_t svqneg_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) -svint16_t svqneg_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) -svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) -svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) -svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) -svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) -svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) -svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) -svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) -svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) -svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) -svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) -svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) -svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) -svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) -svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) -svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) -svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) -svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) -svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) -svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) -svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) -svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) -svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) -svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) -svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) -svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) -svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) -svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) -svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) -svint8_t svqrdmulh(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) -svint32_t svqrdmulh(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) -svint64_t svqrdmulh(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) -svint16_t svqrdmulh(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) -svint8_t svqrdmulh(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) -svint32_t svqrdmulh(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) -svint64_t svqrdmulh(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) -svint16_t svqrdmulh(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) -svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) -svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) -svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) -svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) -svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) -svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) -svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) -svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) -svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) -svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) -svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) -svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) -svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) -svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) -svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) -svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) -svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) -svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) -svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) -svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) -svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) -svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) -svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) -svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) -svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) -svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) -svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) -svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) -svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) -svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) -svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) -svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) -svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) -svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) -svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) -svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) -svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) -svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) -svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) -svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) -svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) -svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) -svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) -svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) -svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) -svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) -svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) -svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) -svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) -svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) -svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) -svint16_t svqrshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) -svint32_t svqrshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) -svint8_t svqrshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) -svuint16_t svqrshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) -svuint32_t svqrshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) -svuint8_t svqrshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) -svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) -svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) -svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) -svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) -svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) -svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) -svuint16_t svqrshrunb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) -svuint32_t svqrshrunb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) -svuint8_t svqrshrunb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) -svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) -svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) -svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) -svint8_t svqshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) -svint32_t svqshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) -svint64_t svqshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) -svint16_t svqshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) -svint8_t svqshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) -svint32_t svqshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) -svint64_t svqshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) -svint16_t svqshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) -svint8_t svqshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) -svint32_t svqshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) -svint64_t svqshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) -svint16_t svqshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) -svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) -svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) -svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) -svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) -svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) -svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) -svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) -svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) -svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) -svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) -svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) -svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) -svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) -svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) -svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) -svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) -svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) -svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) -svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) -svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) -svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) -svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) -svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) -svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) -svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) -svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) -svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) -svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) -svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) -svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) -svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) -svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) -svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) -svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) -svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) -svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) -svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) -svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) -svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) -svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) -svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) -svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) -svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) -svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) -svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) -svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) -svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) -svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) -svint16_t svqshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) -svint32_t svqshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) -svint8_t svqshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) -svuint16_t svqshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) -svuint32_t svqshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) -svuint8_t svqshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) -svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) -svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) -svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) -svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) -svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) -svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) -svuint16_t svqshrunb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) -svuint32_t svqshrunb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) -svuint8_t svqshrunb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) -svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) -svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) -svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) -svint8_t svqsub_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) -svint32_t svqsub_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) -svint64_t svqsub_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) -svint16_t svqsub_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) -svint8_t svqsub_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) -svint32_t svqsub_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) -svint64_t svqsub_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) -svint16_t svqsub_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) -svint8_t svqsub_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) -svint32_t svqsub_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) -svint64_t svqsub_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) -svint16_t svqsub_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) -svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) -svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) -svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) -svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) -svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) -svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) -svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) -svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) -svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) -svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) -svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) -svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) -svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) -svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) -svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) -svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) -svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) -svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) -svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) -svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) -svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) -svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) -svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) -svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) -svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) -svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) -svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) -svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) -svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) -svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) -svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) -svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) -svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) -svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) -svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) -svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) -svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) -svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) -svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) -svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) -svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) -svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) -svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) -svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) -svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) -svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) -svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) -svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) -svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) -svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) -svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) -svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) -svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) -svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) -svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) -svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) -svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) -svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) -svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) -svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) -svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) -svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) -svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) -svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) -svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) -svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) -svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) -svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) -svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) -svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) -svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) -svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) -svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) -svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) -svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) -svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) -svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) -svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) -svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) -svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) -svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) -svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) -svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) -svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) -svint16_t svqxtnb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) -svint32_t svqxtnb(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) -svint8_t svqxtnb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) -svuint16_t svqxtnb(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) -svuint32_t svqxtnb(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) -svuint8_t svqxtnb(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) -svint16_t svqxtnt(svint16_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) -svint32_t svqxtnt(svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) -svint8_t svqxtnt(svint8_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) -svuint16_t svqxtnt(svuint16_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) -svuint32_t svqxtnt(svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) -svuint8_t svqxtnt(svuint8_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) -svuint16_t svqxtunb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) -svuint32_t svqxtunb(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) -svuint8_t svqxtunb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) -svuint16_t svqxtunt(svuint16_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) -svuint32_t svqxtunt(svuint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) -svuint8_t svqxtunt(svuint8_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) -svuint16_t svraddhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) -svuint32_t svraddhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) -svuint8_t svraddhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) -svint16_t svraddhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) -svint32_t svraddhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) -svint8_t svraddhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) -svuint16_t svraddhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) -svuint32_t svraddhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) -svuint8_t svraddhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) -svint16_t svraddhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) -svint32_t svraddhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) -svint8_t svraddhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) -svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) -svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) -svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) -svint16_t svraddhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) -svint32_t svraddhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) -svint8_t svraddhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) -svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) -svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) -svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) -svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) -svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) -svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) -svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) -svuint32_t svrecpe_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) -svuint32_t svrecpe_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) -svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) -svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) -svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) -svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) -svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) -svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) -svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) -svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) -svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) -svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) -svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) -svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) -svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) -svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) -svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) -svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) -svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) -svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) -svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) -svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) -svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) -svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) -svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) -svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) -svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) -svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) -svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) -svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) -svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) -svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) -svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) -svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) -svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) -svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) -svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) -svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) -svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) -svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) -svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) -svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) -svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) -svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) -svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) -svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) -svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) -svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) -svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) -svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) -svint8_t svrshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) -svint32_t svrshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) -svint64_t svrshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) -svint16_t svrshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) -svint8_t svrshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) -svint32_t svrshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) -svint64_t svrshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) -svint16_t svrshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) -svint8_t svrshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) -svint32_t svrshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) -svint64_t svrshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) -svint16_t svrshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) -svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) -svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) -svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) -svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) -svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) -svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) -svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) -svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) -svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) -svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) -svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) -svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) -svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) -svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) -svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) -svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) -svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) -svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) -svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) -svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) -svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) -svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) -svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) -svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) -svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) -svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) -svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) -svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) -svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) -svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) -svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) -svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) -svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) -svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) -svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) -svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) -svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) -svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) -svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) -svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) -svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) -svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) -svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) -svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) -svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) -svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) -svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) -svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) -svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) -svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) -svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) -svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) -svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) -svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) -svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) -svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) -svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) -svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) -svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) -svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) -svuint16_t svrshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) -svuint32_t svrshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) -svuint8_t svrshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) -svint16_t svrshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) -svint32_t svrshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) -svint8_t svrshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) -svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) -svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) -svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) -svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) -svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) -svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) -svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) -svuint32_t svrsqrte_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) -svuint32_t svrsqrte_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) -svint8_t svrsra(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) -svint32_t svrsra(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) -svint64_t svrsra(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) -svint16_t svrsra(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) -svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) -svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) -svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) -svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) -svuint16_t svrsubhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) -svuint32_t svrsubhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) -svuint8_t svrsubhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) -svint16_t svrsubhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) -svint32_t svrsubhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) -svint8_t svrsubhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) -svuint16_t svrsubhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) -svuint32_t svrsubhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) -svuint8_t svrsubhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) -svint16_t svrsubhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) -svint32_t svrsubhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) -svint8_t svrsubhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) -svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) -svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) -svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) -svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) -svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) -svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) -svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) -svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) -svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) -svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) -svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) -svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) -svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) -svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) -svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) -svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) -svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) -svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) -svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) -svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) -svint32_t svshllb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) -svint64_t svshllb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) -svint16_t svshllb(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) -svuint32_t svshllb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) -svuint64_t svshllb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) -svuint16_t svshllb(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) -svint32_t svshllt(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) -svint64_t svshllt(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) -svint16_t svshllt(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) -svuint32_t svshllt(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) -svuint64_t svshllt(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) -svuint16_t svshllt(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) -svuint16_t svshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) -svuint32_t svshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) -svuint8_t svshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) -svint16_t svshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) -svint32_t svshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) -svint8_t svshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) -svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) -svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) -svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) -svint16_t svshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) -svint32_t svshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) -svint8_t svshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) -svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) -svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) -svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) -svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) -svint8_t svsli(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) -svint32_t svsli(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) -svint64_t svsli(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) -svint16_t svsli(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) -svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) -svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) -svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) -svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) -svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) -svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) -svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) -svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) -svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) -svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) -svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) -svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) -svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) -svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) -svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) -svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) -svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) -svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) -svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) -svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) -svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) -svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) -svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) -svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) -svint8_t svsra(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) -svint32_t svsra(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) -svint64_t svsra(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) -svint16_t svsra(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) -svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) -svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) -svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) -svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) -svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) -svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) -svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) -svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) -svint8_t svsri(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) -svint32_t svsri(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) -svint64_t svsri(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) -svint16_t svsri(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) -void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) -void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) -void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) -void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) -void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) -void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) -void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) -void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) -void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) -void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) -void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) -void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) -void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) -void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) -void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) -void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) -void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) -void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) -void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) -void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) -void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) -void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) -void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) -void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) -void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) -void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) -void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) -void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) -void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) -void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) -void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) -void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) -void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) -void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) -void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) -void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) -void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) -void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) -void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) -void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) -void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) -void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) -void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) -void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) -void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) -void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) -void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) -void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) -void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) -void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) -void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) -void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) -void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) -void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) -void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) -void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) -void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) -void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) -void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) -svuint16_t svsubhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) -svuint32_t svsubhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) -svuint8_t svsubhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) -svint16_t svsubhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) -svint32_t svsubhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) -svint8_t svsubhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) -svuint16_t svsubhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) -svuint32_t svsubhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) -svuint8_t svsubhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) -svint16_t svsubhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) -svint32_t svsubhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) -svint8_t svsubhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) -svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) -svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) -svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) -svint16_t svsubhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) -svint32_t svsubhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) -svint8_t svsubhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) -svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) -svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) -svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) -svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) -svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) -svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) -svint32_t svsublb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) -svint64_t svsublb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) -svint16_t svsublb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) -svuint32_t svsublb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) -svuint64_t svsublb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) -svuint16_t svsublb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) -svint32_t svsublb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) -svint64_t svsublb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) -svint16_t svsublb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) -svuint32_t svsublb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) -svuint64_t svsublb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) -svuint16_t svsublb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) -svint32_t svsublbt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) -svint64_t svsublbt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) -svint16_t svsublbt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) -svint32_t svsublbt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) -svint64_t svsublbt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) -svint16_t svsublbt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) -svint32_t svsublt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) -svint64_t svsublt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) -svint16_t svsublt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) -svuint32_t svsublt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) -svuint64_t svsublt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) -svuint16_t svsublt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) -svint32_t svsublt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) -svint64_t svsublt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) -svint16_t svsublt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) -svuint32_t svsublt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) -svuint64_t svsublt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) -svuint16_t svsublt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) -svint32_t svsubltb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) -svint64_t svsubltb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) -svint16_t svsubltb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) -svint32_t svsubltb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) -svint64_t svsubltb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) -svint16_t svsubltb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) -svint32_t svsubwb(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) -svint64_t svsubwb(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) -svint16_t svsubwb(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) -svuint32_t svsubwb(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) -svuint64_t svsubwb(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) -svuint16_t svsubwb(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) -svint32_t svsubwb(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) -svint64_t svsubwb(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) -svint16_t svsubwb(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) -svuint32_t svsubwb(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) -svuint64_t svsubwb(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) -svuint16_t svsubwb(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) -svint32_t svsubwt(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) -svint64_t svsubwt(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) -svint16_t svsubwt(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) -svuint32_t svsubwt(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) -svuint64_t svsubwt(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) -svuint16_t svsubwt(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) -svint32_t svsubwt(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) -svint64_t svsubwt(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) -svint16_t svsubwt(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) -svuint32_t svsubwt(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) -svuint64_t svsubwt(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) -svuint16_t svsubwt(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) -svuint8_t svtbl2(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) -svuint32_t svtbl2(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) -svuint64_t svtbl2(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) -svuint16_t svtbl2(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) -svint8_t svtbl2(svint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) -svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) -svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) -svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) -svint32_t svtbl2(svint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) -svint64_t svtbl2(svint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) -svint16_t svtbl2(svint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) -svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) -svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) -svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) -svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) -svint8_t svtbx(svint8_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) -svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) -svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) -svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) -svint32_t svtbx(svint32_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) -svint64_t svtbx(svint64_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) -svint16_t svtbx(svint16_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) -svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) -svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) -svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) -svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) -svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) -svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) -svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) -svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) -svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) -svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) -svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) -svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) -svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) -svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) -svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) -svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) -svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) -svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) -svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) -svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) -svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) -svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) -svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) -svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) -svbool_t svwhilege_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) -svbool_t svwhilege_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) -svbool_t svwhilege_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) -svbool_t svwhilege_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) -svbool_t svwhilege_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) -svbool_t svwhilege_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) -svbool_t svwhilege_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) -svbool_t svwhilege_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) -svbool_t svwhilege_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) -svbool_t svwhilege_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) -svbool_t svwhilege_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) -svbool_t svwhilege_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) -svbool_t svwhilege_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) -svbool_t svwhilege_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) -svbool_t svwhilege_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) -svbool_t svwhilege_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) -svbool_t svwhilegt_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) -svbool_t svwhilegt_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) -svbool_t svwhilegt_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) -svbool_t svwhilegt_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) -svbool_t svwhilegt_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) -svbool_t svwhilegt_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) -svbool_t svwhilegt_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) -svbool_t svwhilegt_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) -svbool_t svwhilegt_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) -svbool_t svwhilegt_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) -svbool_t svwhilegt_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) -svbool_t svwhilegt_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) -svbool_t svwhilegt_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) -svbool_t svwhilegt_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) -svbool_t svwhilegt_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) -svbool_t svwhilegt_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) -svbool_t svwhilerw(uint8_t const *, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) -svbool_t svwhilerw(int8_t const *, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) -svbool_t svwhilerw(uint64_t const *, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) -svbool_t svwhilerw(float64_t const *, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) -svbool_t svwhilerw(int64_t const *, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) -svbool_t svwhilerw(uint16_t const *, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) -svbool_t svwhilerw(float16_t const *, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) -svbool_t svwhilerw(int16_t const *, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) -svbool_t svwhilerw(uint32_t const *, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) -svbool_t svwhilerw(float32_t const *, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) -svbool_t svwhilerw(int32_t const *, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) -svbool_t svwhilewr(uint8_t const *, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) -svbool_t svwhilewr(int8_t const *, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) -svbool_t svwhilewr(uint64_t const *, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) -svbool_t svwhilewr(float64_t const *, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) -svbool_t svwhilewr(int64_t const *, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) -svbool_t svwhilewr(uint16_t const *, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) -svbool_t svwhilewr(float16_t const *, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) -svbool_t svwhilewr(int16_t const *, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) -svbool_t svwhilewr(uint32_t const *, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) -svbool_t svwhilewr(float32_t const *, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) -svbool_t svwhilewr(int32_t const *, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) -svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) -svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) -svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) -svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) -svint8_t svxar(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) -svint32_t svxar(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) -svint64_t svxar(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) -svint16_t svxar(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) -svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) -svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) -svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) -svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) -svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) -svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) -svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) -svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) -svuint8_t svaesd_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) -svuint8_t svaese_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) -svuint8_t svaesimc_u8(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) -svuint8_t svaesmc_u8(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) -svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) -svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) -svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) -svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) -svuint8_t svaesd(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) -svuint8_t svaese(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) -svuint8_t svaesimc(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) -svuint8_t svaesmc(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) -svuint64_t svpmullb_pair(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) -svuint64_t svpmullb_pair(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) -svuint64_t svpmullt_pair(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) -svuint64_t svpmullt_pair(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) -svuint8_t svbdep_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) -svuint32_t svbdep_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) -svuint64_t svbdep_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) -svuint16_t svbdep_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) -svuint8_t svbdep_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) -svuint32_t svbdep_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) -svuint64_t svbdep_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) -svuint16_t svbdep_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) -svuint8_t svbext_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) -svuint32_t svbext_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) -svuint64_t svbext_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) -svuint16_t svbext_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) -svuint8_t svbext_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) -svuint32_t svbext_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) -svuint64_t svbext_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) -svuint16_t svbext_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) -svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) -svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) -svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) -svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) -svuint8_t svbgrp_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) -svuint32_t svbgrp_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) -svuint64_t svbgrp_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) -svuint16_t svbgrp_u16(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) -svuint8_t svbdep(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) -svuint32_t svbdep(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) -svuint64_t svbdep(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) -svuint16_t svbdep(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) -svuint8_t svbdep(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) -svuint32_t svbdep(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) -svuint64_t svbdep(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) -svuint16_t svbdep(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) -svuint8_t svbext(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) -svuint32_t svbext(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) -svuint64_t svbext(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) -svuint16_t svbext(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) -svuint8_t svbext(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) -svuint32_t svbext(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) -svuint64_t svbext(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) -svuint16_t svbext(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) -svuint8_t svbgrp(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) -svuint32_t svbgrp(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) -svuint64_t svbgrp(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) -svuint16_t svbgrp(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) -svuint8_t svbgrp(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) -svuint32_t svbgrp(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) -svuint64_t svbgrp(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) -svuint16_t svbgrp(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) -svuint64_t svrax1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) -svint64_t svrax1_s64(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) -svuint64_t svrax1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) -svint64_t svrax1(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) -svuint32_t svsm4e_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) -svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) -svuint32_t svsm4e(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) -svuint32_t svsm4ekey(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) -uint8x16_t svaddqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) -uint32x4_t svaddqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) -uint64x2_t svaddqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) -uint16x8_t svaddqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) -int8x16_t svaddqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) -float64x2_t svaddqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) -float32x4_t svaddqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) -float16x8_t svaddqv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) -int32x4_t svaddqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) -int64x2_t svaddqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) -int16x8_t svaddqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) -uint8x16_t svandqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) -uint32x4_t svandqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) -uint64x2_t svandqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) -uint16x8_t svandqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) -int8x16_t svandqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) -int32x4_t svandqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) -int64x2_t svandqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) -int16x8_t svandqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) -uint8x16_t sveorqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) -uint32x4_t sveorqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) -uint64x2_t sveorqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) -uint16x8_t sveorqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) -int8x16_t sveorqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) -int32x4_t sveorqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) -int64x2_t sveorqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) -int16x8_t sveorqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) -svuint8_t svextq_u8(svuint8_t, svuint8_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) -svuint32_t svextq_u32(svuint32_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) -svuint64_t svextq_u64(svuint64_t, svuint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) -svuint16_t svextq_u16(svuint16_t, svuint16_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) -svbfloat16_t svextq_bf16(svbfloat16_t, svbfloat16_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) -svint8_t svextq_s8(svint8_t, svint8_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) -svfloat64_t svextq_f64(svfloat64_t, svfloat64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) -svfloat32_t svextq_f32(svfloat32_t, svfloat32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) -svfloat16_t svextq_f16(svfloat16_t, svfloat16_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) -svint32_t svextq_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) -svint64_t svextq_s64(svint64_t, svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) -svint16_t svextq_s16(svint16_t, svint16_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) -svuint32_t svld1q_gather_u64base_index_u32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) -svuint64_t svld1q_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) -svuint16_t svld1q_gather_u64base_index_u16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) -svbfloat16_t svld1q_gather_u64base_index_bf16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) -svfloat64_t svld1q_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) -svfloat32_t svld1q_gather_u64base_index_f32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) -svfloat16_t svld1q_gather_u64base_index_f16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) -svint32_t svld1q_gather_u64base_index_s32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) -svint64_t svld1q_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) -svint16_t svld1q_gather_u64base_index_s16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) -svuint8_t svld1q_gather_u64base_offset_u8(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) -svuint32_t svld1q_gather_u64base_offset_u32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) -svuint64_t svld1q_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) -svuint16_t svld1q_gather_u64base_offset_u16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) -svbfloat16_t svld1q_gather_u64base_offset_bf16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) -svint8_t svld1q_gather_u64base_offset_s8(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) -svfloat64_t svld1q_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) -svfloat32_t svld1q_gather_u64base_offset_f32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) -svfloat16_t svld1q_gather_u64base_offset_f16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) -svint32_t svld1q_gather_u64base_offset_s32(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) -svint64_t svld1q_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) -svint16_t svld1q_gather_u64base_offset_s16(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) -svuint8_t svld1q_gather_u64base_u8(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) -svuint32_t svld1q_gather_u64base_u32(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) -svuint64_t svld1q_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) -svuint16_t svld1q_gather_u64base_u16(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) -svbfloat16_t svld1q_gather_u64base_bf16(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) -svint8_t svld1q_gather_u64base_s8(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) -svfloat64_t svld1q_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) -svfloat32_t svld1q_gather_u64base_f32(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) -svfloat16_t svld1q_gather_u64base_f16(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) -svint32_t svld1q_gather_u64base_s32(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) -svint64_t svld1q_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) -svint16_t svld1q_gather_u64base_s16(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) -svuint32_t svld1q_gather_u64index_u32(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) -svuint64_t svld1q_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) -svuint16_t svld1q_gather_u64index_u16(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) -svbfloat16_t svld1q_gather_u64index_bf16(svbool_t, bfloat16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) -svfloat64_t svld1q_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) -svfloat32_t svld1q_gather_u64index_f32(svbool_t, float32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) -svfloat16_t svld1q_gather_u64index_f16(svbool_t, float16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) -svint32_t svld1q_gather_u64index_s32(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) -svint64_t svld1q_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) -svint16_t svld1q_gather_u64index_s16(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) -svuint8_t svld1q_gather_u64offset_u8(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) -svuint32_t svld1q_gather_u64offset_u32(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) -svuint64_t svld1q_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) -svuint16_t svld1q_gather_u64offset_u16(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) -svbfloat16_t svld1q_gather_u64offset_bf16(svbool_t, bfloat16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) -svint8_t svld1q_gather_u64offset_s8(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) -svfloat64_t svld1q_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) -svfloat32_t svld1q_gather_u64offset_f32(svbool_t, float32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) -svfloat16_t svld1q_gather_u64offset_f16(svbool_t, float16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) -svint32_t svld1q_gather_u64offset_s32(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) -svint64_t svld1q_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) -svint16_t svld1q_gather_u64offset_s16(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) -svuint64_t svld1udq_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) -svfloat64_t svld1udq_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) -svint64_t svld1udq_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) -svuint64_t svld1udq_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) -svfloat64_t svld1udq_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) -svint64_t svld1udq_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) -svuint32_t svld1uwq_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) -svfloat32_t svld1uwq_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) -svint32_t svld1uwq_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) -svuint32_t svld1uwq_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) -svfloat32_t svld1uwq_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) -svint32_t svld1uwq_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) -svuint8x2_t svld2q_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) -svuint32x2_t svld2q_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) -svuint64x2_t svld2q_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) -svuint16x2_t svld2q_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) -svint8x2_t svld2q_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) -svfloat64x2_t svld2q_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) -svfloat32x2_t svld2q_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) -svfloat16x2_t svld2q_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) -svint32x2_t svld2q_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) -svint64x2_t svld2q_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) -svint16x2_t svld2q_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) -svbfloat16x2_t svld2q_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) -svuint8x2_t svld2q_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) -svuint32x2_t svld2q_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) -svuint64x2_t svld2q_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) -svuint16x2_t svld2q_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) -svint8x2_t svld2q_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) -svfloat64x2_t svld2q_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) -svfloat32x2_t svld2q_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) -svfloat16x2_t svld2q_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) -svint32x2_t svld2q_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) -svint64x2_t svld2q_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) -svint16x2_t svld2q_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) -svbfloat16x2_t svld2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) -svuint8x3_t svld3q_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) -svuint32x3_t svld3q_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) -svuint64x3_t svld3q_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) -svuint16x3_t svld3q_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) -svint8x3_t svld3q_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) -svfloat64x3_t svld3q_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) -svfloat32x3_t svld3q_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) -svfloat16x3_t svld3q_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) -svint32x3_t svld3q_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) -svint64x3_t svld3q_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) -svint16x3_t svld3q_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) -svbfloat16x3_t svld3q_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) -svuint8x3_t svld3q_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) -svuint32x3_t svld3q_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) -svuint64x3_t svld3q_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) -svuint16x3_t svld3q_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) -svint8x3_t svld3q_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) -svfloat64x3_t svld3q_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) -svfloat32x3_t svld3q_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) -svfloat16x3_t svld3q_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) -svint32x3_t svld3q_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) -svint64x3_t svld3q_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) -svint16x3_t svld3q_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) -svbfloat16x3_t svld3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) -svuint8x4_t svld4q_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) -svuint32x4_t svld4q_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) -svuint64x4_t svld4q_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) -svuint16x4_t svld4q_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) -svint8x4_t svld4q_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) -svfloat64x4_t svld4q_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) -svfloat32x4_t svld4q_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) -svfloat16x4_t svld4q_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) -svint32x4_t svld4q_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) -svint64x4_t svld4q_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) -svint16x4_t svld4q_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) -svbfloat16x4_t svld4q_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) -svuint8x4_t svld4q_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) -svuint32x4_t svld4q_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) -svuint64x4_t svld4q_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) -svuint16x4_t svld4q_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) -svint8x4_t svld4q_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) -svfloat64x4_t svld4q_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) -svfloat32x4_t svld4q_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) -svfloat16x4_t svld4q_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) -svint32x4_t svld4q_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) -svint64x4_t svld4q_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) -svint16x4_t svld4q_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) -svbfloat16x4_t svld4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) -float64x2_t svmaxnmqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) -float32x4_t svmaxnmqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) -float16x8_t svmaxnmqv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) -float64x2_t svmaxqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) -float32x4_t svmaxqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) -float16x8_t svmaxqv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) -int8x16_t svmaxqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) -int32x4_t svmaxqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) -int64x2_t svmaxqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) -int16x8_t svmaxqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) -uint8x16_t svmaxqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) -uint32x4_t svmaxqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) -uint64x2_t svmaxqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) -uint16x8_t svmaxqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) -float64x2_t svminnmqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) -float32x4_t svminnmqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) -float16x8_t svminnmqv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) -float64x2_t svminqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) -float32x4_t svminqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) -float16x8_t svminqv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) -int8x16_t svminqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) -int32x4_t svminqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) -int64x2_t svminqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) -int16x8_t svminqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) -uint8x16_t svminqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) -uint32x4_t svminqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) -uint64x2_t svminqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) -uint16x8_t svminqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) -uint8x16_t svorqv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) -uint32x4_t svorqv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) -uint64x2_t svorqv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) -uint16x8_t svorqv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) -int8x16_t svorqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) -int32x4_t svorqv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) -int64x2_t svorqv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) -int16x8_t svorqv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) -svbool_t svpmov_u8(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) -svbool_t svpmov_s8(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) -svbool_t svpmov_u64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) -svbool_t svpmov_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) -svbool_t svpmov_u16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) -svbool_t svpmov_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) -svbool_t svpmov_u32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) -svbool_t svpmov_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) -svbool_t svpmov_lane_u8(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) -svbool_t svpmov_lane_s8(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) -svbool_t svpmov_lane_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) -svbool_t svpmov_lane_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) -svbool_t svpmov_lane_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) -svbool_t svpmov_lane_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) -svbool_t svpmov_lane_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) -svbool_t svpmov_lane_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) -svuint64_t svpmov_lane_u64_m(svuint64_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) -svint64_t svpmov_lane_s64_m(svint64_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) -svuint16_t svpmov_lane_u16_m(svuint16_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) -svint16_t svpmov_lane_s16_m(svint16_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) -svuint32_t svpmov_lane_u32_m(svuint32_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) -svint32_t svpmov_lane_s32_m(svint32_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8_z))) -svuint8_t svpmov_u8_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8_z))) -svint8_t svpmov_s8_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64_z))) -svuint64_t svpmov_u64_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64_z))) -svint64_t svpmov_s64_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16_z))) -svuint16_t svpmov_u16_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16_z))) -svint16_t svpmov_s16_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32_z))) -svuint32_t svpmov_u32_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32_z))) -svint32_t svpmov_s32_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) -void svst1dq_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) -void svst1dq_f64(svbool_t, float64_t const *, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) -void svst1dq_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) -void svst1dq_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) -void svst1dq_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) -void svst1dq_vnum_s64(svbool_t, int64_t const *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) -void svst1q_scatter_u64base_u8(svbool_t, svuint64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) -void svst1q_scatter_u64base_u32(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) -void svst1q_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) -void svst1q_scatter_u64base_u16(svbool_t, svuint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) -void svst1q_scatter_u64base_bf16(svbool_t, svuint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) -void svst1q_scatter_u64base_s8(svbool_t, svuint64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) -void svst1q_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) -void svst1q_scatter_u64base_f32(svbool_t, svuint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) -void svst1q_scatter_u64base_f16(svbool_t, svuint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) -void svst1q_scatter_u64base_s32(svbool_t, svuint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) -void svst1q_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) -void svst1q_scatter_u64base_s16(svbool_t, svuint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) -void svst1q_scatter_u64base_index_u32(svbool_t, svuint64_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) -void svst1q_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) -void svst1q_scatter_u64base_index_u16(svbool_t, svuint64_t, int64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) -void svst1q_scatter_u64base_index_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) -void svst1q_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) -void svst1q_scatter_u64base_index_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) -void svst1q_scatter_u64base_index_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) -void svst1q_scatter_u64base_index_s32(svbool_t, svuint64_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) -void svst1q_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) -void svst1q_scatter_u64base_index_s16(svbool_t, svuint64_t, int64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) -void svst1q_scatter_u64base_offset_u8(svbool_t, svuint64_t, int64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) -void svst1q_scatter_u64base_offset_u32(svbool_t, svuint64_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) -void svst1q_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) -void svst1q_scatter_u64base_offset_u16(svbool_t, svuint64_t, int64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) -void svst1q_scatter_u64base_offset_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) -void svst1q_scatter_u64base_offset_s8(svbool_t, svuint64_t, int64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) -void svst1q_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) -void svst1q_scatter_u64base_offset_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) -void svst1q_scatter_u64base_offset_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) -void svst1q_scatter_u64base_offset_s32(svbool_t, svuint64_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) -void svst1q_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) -void svst1q_scatter_u64base_offset_s16(svbool_t, svuint64_t, int64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) -void svst1q_scatter_u64index_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) -void svst1q_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) -void svst1q_scatter_u64index_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) -void svst1q_scatter_u64index_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) -void svst1q_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) -void svst1q_scatter_u64index_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) -void svst1q_scatter_u64index_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) -void svst1q_scatter_u64index_s32(svbool_t, int32_t *, svuint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) -void svst1q_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) -void svst1q_scatter_u64index_s16(svbool_t, int16_t *, svuint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) -void svst1q_scatter_u64offset_u8(svbool_t, uint8_t *, svuint64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) -void svst1q_scatter_u64offset_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) -void svst1q_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) -void svst1q_scatter_u64offset_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) -void svst1q_scatter_u64offset_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) -void svst1q_scatter_u64offset_s8(svbool_t, int8_t *, svuint64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) -void svst1q_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) -void svst1q_scatter_u64offset_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) -void svst1q_scatter_u64offset_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) -void svst1q_scatter_u64offset_s32(svbool_t, int32_t *, svuint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) -void svst1q_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) -void svst1q_scatter_u64offset_s16(svbool_t, int16_t *, svuint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) -void svst1wq_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) -void svst1wq_f32(svbool_t, float32_t const *, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) -void svst1wq_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) -void svst1wq_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) -void svst1wq_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) -void svst1wq_vnum_s32(svbool_t, int32_t const *, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) -void svst2q_u8(svbool_t, uint8_t const *, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) -void svst2q_u32(svbool_t, uint32_t const *, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) -void svst2q_u64(svbool_t, uint64_t const *, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) -void svst2q_u16(svbool_t, uint16_t const *, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) -void svst2q_s8(svbool_t, int8_t const *, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) -void svst2q_f64(svbool_t, float64_t const *, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) -void svst2q_f32(svbool_t, float32_t const *, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) -void svst2q_f16(svbool_t, float16_t const *, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) -void svst2q_s32(svbool_t, int32_t const *, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) -void svst2q_s64(svbool_t, int64_t const *, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) -void svst2q_s16(svbool_t, int16_t const *, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) -void svst2q_bf16(svbool_t, bfloat16_t const *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) -void svst2q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) -void svst2q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) -void svst2q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) -void svst2q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) -void svst2q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) -void svst2q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) -void svst2q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) -void svst2q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) -void svst2q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) -void svst2q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) -void svst2q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) -void svst2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) -void svst3q_u8(svbool_t, uint8_t const *, svuint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) -void svst3q_u32(svbool_t, uint32_t const *, svuint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) -void svst3q_u64(svbool_t, uint64_t const *, svuint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) -void svst3q_u16(svbool_t, uint16_t const *, svuint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) -void svst3q_s8(svbool_t, int8_t const *, svint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) -void svst3q_f64(svbool_t, float64_t const *, svfloat64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) -void svst3q_f32(svbool_t, float32_t const *, svfloat32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) -void svst3q_f16(svbool_t, float16_t const *, svfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) -void svst3q_s32(svbool_t, int32_t const *, svint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) -void svst3q_s64(svbool_t, int64_t const *, svint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) -void svst3q_s16(svbool_t, int16_t const *, svint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) -void svst3q_bf16(svbool_t, bfloat16_t const *, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) -void svst3q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) -void svst3q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) -void svst3q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) -void svst3q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) -void svst3q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) -void svst3q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) -void svst3q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) -void svst3q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) -void svst3q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) -void svst3q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) -void svst3q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) -void svst3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) -void svst4q_u8(svbool_t, uint8_t const *, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) -void svst4q_u32(svbool_t, uint32_t const *, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) -void svst4q_u64(svbool_t, uint64_t const *, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) -void svst4q_u16(svbool_t, uint16_t const *, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) -void svst4q_s8(svbool_t, int8_t const *, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) -void svst4q_f64(svbool_t, float64_t const *, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) -void svst4q_f32(svbool_t, float32_t const *, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) -void svst4q_f16(svbool_t, float16_t const *, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) -void svst4q_s32(svbool_t, int32_t const *, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) -void svst4q_s64(svbool_t, int64_t const *, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) -void svst4q_s16(svbool_t, int16_t const *, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) -void svst4q_bf16(svbool_t, bfloat16_t const *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) -void svst4q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) -void svst4q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) -void svst4q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) -void svst4q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) -void svst4q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) -void svst4q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) -void svst4q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) -void svst4q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) -void svst4q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) -void svst4q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) -void svst4q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) -void svst4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) -svuint8_t svtblq_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) -svuint32_t svtblq_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) -svuint64_t svtblq_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) -svuint16_t svtblq_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) -svbfloat16_t svtblq_bf16(svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) -svint8_t svtblq_s8(svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) -svfloat64_t svtblq_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) -svfloat32_t svtblq_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) -svfloat16_t svtblq_f16(svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) -svint32_t svtblq_s32(svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) -svint64_t svtblq_s64(svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) -svint16_t svtblq_s16(svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) -svuint8_t svtbxq_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) -svuint32_t svtbxq_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) -svuint64_t svtbxq_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) -svuint16_t svtbxq_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) -svbfloat16_t svtbxq_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) -svint8_t svtbxq_s8(svint8_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) -svfloat64_t svtbxq_f64(svfloat64_t, svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) -svfloat32_t svtbxq_f32(svfloat32_t, svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) -svfloat16_t svtbxq_f16(svfloat16_t, svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) -svint32_t svtbxq_s32(svint32_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) -svint64_t svtbxq_s64(svint64_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) -svint16_t svtbxq_s16(svint16_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) -svuint8_t svuzpq1_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) -svuint32_t svuzpq1_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) -svuint64_t svuzpq1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) -svuint16_t svuzpq1_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) -svbfloat16_t svuzpq1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) -svint8_t svuzpq1_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) -svfloat64_t svuzpq1_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) -svfloat32_t svuzpq1_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) -svfloat16_t svuzpq1_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) -svint32_t svuzpq1_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) -svint64_t svuzpq1_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) -svint16_t svuzpq1_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) -svuint8_t svuzpq2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) -svuint32_t svuzpq2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) -svuint64_t svuzpq2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) -svuint16_t svuzpq2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) -svbfloat16_t svuzpq2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) -svint8_t svuzpq2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) -svfloat64_t svuzpq2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) -svfloat32_t svuzpq2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) -svfloat16_t svuzpq2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) -svint32_t svuzpq2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) -svint64_t svuzpq2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) -svint16_t svuzpq2_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) -svuint8_t svzipq1_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) -svuint32_t svzipq1_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) -svuint64_t svzipq1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) -svuint16_t svzipq1_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) -svbfloat16_t svzipq1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) -svint8_t svzipq1_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) -svfloat64_t svzipq1_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) -svfloat32_t svzipq1_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) -svfloat16_t svzipq1_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) -svint32_t svzipq1_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) -svint64_t svzipq1_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) -svint16_t svzipq1_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) -svuint8_t svzipq2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) -svuint32_t svzipq2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) -svuint64_t svzipq2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) -svuint16_t svzipq2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) -svbfloat16_t svzipq2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) -svint8_t svzipq2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) -svfloat64_t svzipq2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) -svfloat32_t svzipq2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) -svfloat16_t svzipq2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) -svint32_t svzipq2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) -svint64_t svzipq2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) -svint16_t svzipq2_s16(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) -uint8x16_t svaddqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) -uint32x4_t svaddqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) -uint64x2_t svaddqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) -uint16x8_t svaddqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) -int8x16_t svaddqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) -float64x2_t svaddqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) -float32x4_t svaddqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) -float16x8_t svaddqv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) -int32x4_t svaddqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) -int64x2_t svaddqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) -int16x8_t svaddqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) -uint8x16_t svandqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) -uint32x4_t svandqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) -uint64x2_t svandqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) -uint16x8_t svandqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) -int8x16_t svandqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) -int32x4_t svandqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) -int64x2_t svandqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) -int16x8_t svandqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) -uint8x16_t sveorqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) -uint32x4_t sveorqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) -uint64x2_t sveorqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) -uint16x8_t sveorqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) -int8x16_t sveorqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) -int32x4_t sveorqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) -int64x2_t sveorqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) -int16x8_t sveorqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) -svuint8_t svextq(svuint8_t, svuint8_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) -svuint32_t svextq(svuint32_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) -svuint64_t svextq(svuint64_t, svuint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) -svuint16_t svextq(svuint16_t, svuint16_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) -svbfloat16_t svextq(svbfloat16_t, svbfloat16_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) -svint8_t svextq(svint8_t, svint8_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) -svfloat64_t svextq(svfloat64_t, svfloat64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) -svfloat32_t svextq(svfloat32_t, svfloat32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) -svfloat16_t svextq(svfloat16_t, svfloat16_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) -svint32_t svextq(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) -svint64_t svextq(svint64_t, svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) -svint16_t svextq(svint16_t, svint16_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) -svuint32_t svld1q_gather_index_u32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) -svuint64_t svld1q_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) -svuint16_t svld1q_gather_index_u16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) -svbfloat16_t svld1q_gather_index_bf16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) -svfloat64_t svld1q_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) -svfloat32_t svld1q_gather_index_f32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) -svfloat16_t svld1q_gather_index_f16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) -svint32_t svld1q_gather_index_s32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) -svint64_t svld1q_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) -svint16_t svld1q_gather_index_s16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) -svuint8_t svld1q_gather_offset_u8(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) -svuint32_t svld1q_gather_offset_u32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) -svuint64_t svld1q_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) -svuint16_t svld1q_gather_offset_u16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) -svbfloat16_t svld1q_gather_offset_bf16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) -svint8_t svld1q_gather_offset_s8(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) -svfloat64_t svld1q_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) -svfloat32_t svld1q_gather_offset_f32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) -svfloat16_t svld1q_gather_offset_f16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) -svint32_t svld1q_gather_offset_s32(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) -svint64_t svld1q_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) -svint16_t svld1q_gather_offset_s16(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) -svuint8_t svld1q_gather_u8(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) -svuint32_t svld1q_gather_u32(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) -svuint64_t svld1q_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) -svuint16_t svld1q_gather_u16(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) -svbfloat16_t svld1q_gather_bf16(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) -svint8_t svld1q_gather_s8(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) -svfloat64_t svld1q_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) -svfloat32_t svld1q_gather_f32(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) -svfloat16_t svld1q_gather_f16(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) -svint32_t svld1q_gather_s32(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) -svint64_t svld1q_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) -svint16_t svld1q_gather_s16(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) -svuint32_t svld1q_gather_index(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) -svuint64_t svld1q_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) -svuint16_t svld1q_gather_index(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) -svbfloat16_t svld1q_gather_index(svbool_t, bfloat16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) -svfloat64_t svld1q_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) -svfloat32_t svld1q_gather_index(svbool_t, float32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) -svfloat16_t svld1q_gather_index(svbool_t, float16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) -svint32_t svld1q_gather_index(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) -svint64_t svld1q_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) -svint16_t svld1q_gather_index(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) -svuint8_t svld1q_gather_offset(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) -svuint32_t svld1q_gather_offset(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) -svuint64_t svld1q_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) -svuint16_t svld1q_gather_offset(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) -svbfloat16_t svld1q_gather_offset(svbool_t, bfloat16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) -svint8_t svld1q_gather_offset(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) -svfloat64_t svld1q_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) -svfloat32_t svld1q_gather_offset(svbool_t, float32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) -svfloat16_t svld1q_gather_offset(svbool_t, float16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) -svint32_t svld1q_gather_offset(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) -svint64_t svld1q_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) -svint16_t svld1q_gather_offset(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) -svuint64_t svld1udq(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) -svfloat64_t svld1udq(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) -svint64_t svld1udq(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) -svuint64_t svld1udq_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) -svfloat64_t svld1udq_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) -svint64_t svld1udq_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) -svuint32_t svld1uwq(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) -svfloat32_t svld1uwq(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) -svint32_t svld1uwq(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) -svuint32_t svld1uwq_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) -svfloat32_t svld1uwq_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) -svint32_t svld1uwq_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) -svuint8x2_t svld2q(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) -svuint32x2_t svld2q(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) -svuint64x2_t svld2q(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) -svuint16x2_t svld2q(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) -svint8x2_t svld2q(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) -svfloat64x2_t svld2q(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) -svfloat32x2_t svld2q(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) -svfloat16x2_t svld2q(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) -svint32x2_t svld2q(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) -svint64x2_t svld2q(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) -svint16x2_t svld2q(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) -svbfloat16x2_t svld2q(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) -svuint8x2_t svld2q_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) -svuint32x2_t svld2q_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) -svuint64x2_t svld2q_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) -svuint16x2_t svld2q_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) -svint8x2_t svld2q_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) -svfloat64x2_t svld2q_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) -svfloat32x2_t svld2q_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) -svfloat16x2_t svld2q_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) -svint32x2_t svld2q_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) -svint64x2_t svld2q_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) -svint16x2_t svld2q_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) -svbfloat16x2_t svld2q_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) -svuint8x3_t svld3q(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) -svuint32x3_t svld3q(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) -svuint64x3_t svld3q(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) -svuint16x3_t svld3q(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) -svint8x3_t svld3q(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) -svfloat64x3_t svld3q(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) -svfloat32x3_t svld3q(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) -svfloat16x3_t svld3q(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) -svint32x3_t svld3q(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) -svint64x3_t svld3q(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) -svint16x3_t svld3q(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) -svbfloat16x3_t svld3q(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) -svuint8x3_t svld3q_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) -svuint32x3_t svld3q_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) -svuint64x3_t svld3q_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) -svuint16x3_t svld3q_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) -svint8x3_t svld3q_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) -svfloat64x3_t svld3q_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) -svfloat32x3_t svld3q_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) -svfloat16x3_t svld3q_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) -svint32x3_t svld3q_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) -svint64x3_t svld3q_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) -svint16x3_t svld3q_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) -svbfloat16x3_t svld3q_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) -svuint8x4_t svld4q(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) -svuint32x4_t svld4q(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) -svuint64x4_t svld4q(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) -svuint16x4_t svld4q(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) -svint8x4_t svld4q(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) -svfloat64x4_t svld4q(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) -svfloat32x4_t svld4q(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) -svfloat16x4_t svld4q(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) -svint32x4_t svld4q(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) -svint64x4_t svld4q(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) -svint16x4_t svld4q(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) -svbfloat16x4_t svld4q(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) -svuint8x4_t svld4q_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) -svuint32x4_t svld4q_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) -svuint64x4_t svld4q_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) -svuint16x4_t svld4q_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) -svint8x4_t svld4q_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) -svfloat64x4_t svld4q_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) -svfloat32x4_t svld4q_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) -svfloat16x4_t svld4q_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) -svint32x4_t svld4q_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) -svint64x4_t svld4q_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) -svint16x4_t svld4q_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) -svbfloat16x4_t svld4q_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) -float64x2_t svmaxnmqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) -float32x4_t svmaxnmqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) -float16x8_t svmaxnmqv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) -float64x2_t svmaxqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) -float32x4_t svmaxqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) -float16x8_t svmaxqv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) -int8x16_t svmaxqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) -int32x4_t svmaxqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) -int64x2_t svmaxqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) -int16x8_t svmaxqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) -uint8x16_t svmaxqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) -uint32x4_t svmaxqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) -uint64x2_t svmaxqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) -uint16x8_t svmaxqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) -float64x2_t svminnmqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) -float32x4_t svminnmqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) -float16x8_t svminnmqv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) -float64x2_t svminqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) -float32x4_t svminqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) -float16x8_t svminqv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) -int8x16_t svminqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) -int32x4_t svminqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) -int64x2_t svminqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) -int16x8_t svminqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) -uint8x16_t svminqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) -uint32x4_t svminqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) -uint64x2_t svminqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) -uint16x8_t svminqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) -uint8x16_t svorqv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) -uint32x4_t svorqv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) -uint64x2_t svorqv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) -uint16x8_t svorqv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) -int8x16_t svorqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) -int32x4_t svorqv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) -int64x2_t svorqv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) -int16x8_t svorqv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) -svbool_t svpmov(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) -svbool_t svpmov(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) -svbool_t svpmov(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) -svbool_t svpmov(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) -svbool_t svpmov(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) -svbool_t svpmov(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) -svbool_t svpmov(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) -svbool_t svpmov(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) -svbool_t svpmov_lane(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) -svbool_t svpmov_lane(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) -svbool_t svpmov_lane(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) -svbool_t svpmov_lane(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) -svbool_t svpmov_lane(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) -svbool_t svpmov_lane(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) -svbool_t svpmov_lane(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) -svbool_t svpmov_lane(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) -svuint64_t svpmov_lane_m(svuint64_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) -svint64_t svpmov_lane_m(svint64_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) -svuint16_t svpmov_lane_m(svuint16_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) -svint16_t svpmov_lane_m(svint16_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) -svuint32_t svpmov_lane_m(svuint32_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) -svint32_t svpmov_lane_m(svint32_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) -void svst1dq(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) -void svst1dq(svbool_t, float64_t const *, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) -void svst1dq(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) -void svst1dq_vnum(svbool_t, uint64_t const *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) -void svst1dq_vnum(svbool_t, float64_t const *, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) -void svst1dq_vnum(svbool_t, int64_t const *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) -void svst1q_scatter(svbool_t, svuint64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) -void svst1q_scatter(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) -void svst1q_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) -void svst1q_scatter(svbool_t, svuint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) -void svst1q_scatter(svbool_t, svuint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) -void svst1q_scatter(svbool_t, svuint64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) -void svst1q_scatter(svbool_t, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) -void svst1q_scatter(svbool_t, svuint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) -void svst1q_scatter(svbool_t, svuint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) -void svst1q_scatter(svbool_t, svuint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) -void svst1q_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) -void svst1q_scatter(svbool_t, svuint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) -void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) -void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) -void svst1q_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) -void svst1q_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) -void svst1q_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) -void svst1q_scatter_index(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) -void svst1q_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) -void svst1q_scatter_index(svbool_t, float32_t *, svuint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) -void svst1q_scatter_index(svbool_t, float16_t *, svuint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) -void svst1q_scatter_index(svbool_t, int32_t *, svuint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) -void svst1q_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) -void svst1q_scatter_index(svbool_t, int16_t *, svuint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) -void svst1q_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) -void svst1q_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) -void svst1q_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) -void svst1q_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) -void svst1q_scatter_offset(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) -void svst1q_scatter_offset(svbool_t, int8_t *, svuint64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) -void svst1q_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) -void svst1q_scatter_offset(svbool_t, float32_t *, svuint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) -void svst1q_scatter_offset(svbool_t, float16_t *, svuint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) -void svst1q_scatter_offset(svbool_t, int32_t *, svuint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) -void svst1q_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) -void svst1q_scatter_offset(svbool_t, int16_t *, svuint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) -void svst1wq(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) -void svst1wq(svbool_t, float32_t const *, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) -void svst1wq(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) -void svst1wq_vnum(svbool_t, uint32_t const *, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) -void svst1wq_vnum(svbool_t, float32_t const *, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) -void svst1wq_vnum(svbool_t, int32_t const *, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) -void svst2q(svbool_t, uint8_t const *, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) -void svst2q(svbool_t, uint32_t const *, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) -void svst2q(svbool_t, uint64_t const *, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) -void svst2q(svbool_t, uint16_t const *, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) -void svst2q(svbool_t, int8_t const *, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) -void svst2q(svbool_t, float64_t const *, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) -void svst2q(svbool_t, float32_t const *, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) -void svst2q(svbool_t, float16_t const *, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) -void svst2q(svbool_t, int32_t const *, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) -void svst2q(svbool_t, int64_t const *, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) -void svst2q(svbool_t, int16_t const *, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) -void svst2q(svbool_t, bfloat16_t const *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) -void svst2q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) -void svst2q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) -void svst2q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) -void svst2q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) -void svst2q_vnum(svbool_t, int8_t const *, int64_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) -void svst2q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) -void svst2q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) -void svst2q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) -void svst2q_vnum(svbool_t, int32_t const *, int64_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) -void svst2q_vnum(svbool_t, int64_t const *, int64_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) -void svst2q_vnum(svbool_t, int16_t const *, int64_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) -void svst2q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) -void svst3q(svbool_t, uint8_t const *, svuint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) -void svst3q(svbool_t, uint32_t const *, svuint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) -void svst3q(svbool_t, uint64_t const *, svuint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) -void svst3q(svbool_t, uint16_t const *, svuint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) -void svst3q(svbool_t, int8_t const *, svint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) -void svst3q(svbool_t, float64_t const *, svfloat64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) -void svst3q(svbool_t, float32_t const *, svfloat32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) -void svst3q(svbool_t, float16_t const *, svfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) -void svst3q(svbool_t, int32_t const *, svint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) -void svst3q(svbool_t, int64_t const *, svint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) -void svst3q(svbool_t, int16_t const *, svint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) -void svst3q(svbool_t, bfloat16_t const *, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) -void svst3q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) -void svst3q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) -void svst3q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) -void svst3q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) -void svst3q_vnum(svbool_t, int8_t const *, int64_t, svint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) -void svst3q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) -void svst3q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) -void svst3q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) -void svst3q_vnum(svbool_t, int32_t const *, int64_t, svint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) -void svst3q_vnum(svbool_t, int64_t const *, int64_t, svint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) -void svst3q_vnum(svbool_t, int16_t const *, int64_t, svint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) -void svst3q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) -void svst4q(svbool_t, uint8_t const *, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) -void svst4q(svbool_t, uint32_t const *, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) -void svst4q(svbool_t, uint64_t const *, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) -void svst4q(svbool_t, uint16_t const *, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) -void svst4q(svbool_t, int8_t const *, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) -void svst4q(svbool_t, float64_t const *, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) -void svst4q(svbool_t, float32_t const *, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) -void svst4q(svbool_t, float16_t const *, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) -void svst4q(svbool_t, int32_t const *, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) -void svst4q(svbool_t, int64_t const *, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) -void svst4q(svbool_t, int16_t const *, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) -void svst4q(svbool_t, bfloat16_t const *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) -void svst4q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) -void svst4q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) -void svst4q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) -void svst4q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) -void svst4q_vnum(svbool_t, int8_t const *, int64_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) -void svst4q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) -void svst4q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) -void svst4q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) -void svst4q_vnum(svbool_t, int32_t const *, int64_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) -void svst4q_vnum(svbool_t, int64_t const *, int64_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) -void svst4q_vnum(svbool_t, int16_t const *, int64_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) -void svst4q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) -svuint8_t svtblq(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) -svuint32_t svtblq(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) -svuint64_t svtblq(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) -svuint16_t svtblq(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) -svbfloat16_t svtblq(svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) -svint8_t svtblq(svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) -svfloat64_t svtblq(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) -svfloat32_t svtblq(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) -svfloat16_t svtblq(svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) -svint32_t svtblq(svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) -svint64_t svtblq(svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) -svint16_t svtblq(svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) -svuint8_t svtbxq(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) -svuint32_t svtbxq(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) -svuint64_t svtbxq(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) -svuint16_t svtbxq(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) -svbfloat16_t svtbxq(svbfloat16_t, svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) -svint8_t svtbxq(svint8_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) -svfloat64_t svtbxq(svfloat64_t, svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) -svfloat32_t svtbxq(svfloat32_t, svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) -svfloat16_t svtbxq(svfloat16_t, svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) -svint32_t svtbxq(svint32_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) -svint64_t svtbxq(svint64_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) -svint16_t svtbxq(svint16_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) -svuint8_t svuzpq1(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) -svuint32_t svuzpq1(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) -svuint64_t svuzpq1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) -svuint16_t svuzpq1(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) -svbfloat16_t svuzpq1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) -svint8_t svuzpq1(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) -svfloat64_t svuzpq1(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) -svfloat32_t svuzpq1(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) -svfloat16_t svuzpq1(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) -svint32_t svuzpq1(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) -svint64_t svuzpq1(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) -svint16_t svuzpq1(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) -svuint8_t svuzpq2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) -svuint32_t svuzpq2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) -svuint64_t svuzpq2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) -svuint16_t svuzpq2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) -svbfloat16_t svuzpq2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) -svint8_t svuzpq2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) -svfloat64_t svuzpq2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) -svfloat32_t svuzpq2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) -svfloat16_t svuzpq2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) -svint32_t svuzpq2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) -svint64_t svuzpq2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) -svint16_t svuzpq2(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) -svuint8_t svzipq1(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) -svuint32_t svzipq1(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) -svuint64_t svzipq1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) -svuint16_t svzipq1(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) -svbfloat16_t svzipq1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) -svint8_t svzipq1(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) -svfloat64_t svzipq1(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) -svfloat32_t svzipq1(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) -svfloat16_t svzipq1(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) -svint32_t svzipq1(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) -svint64_t svzipq1(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) -svint16_t svzipq1(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) -svuint8_t svzipq2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) -svuint32_t svzipq2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) -svuint64_t svzipq2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) -svuint16_t svzipq2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) -svbfloat16_t svzipq2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) -svint8_t svzipq2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) -svfloat64_t svzipq2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) -svfloat32_t svzipq2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) -svfloat16_t svzipq2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) -svint32_t svzipq2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) -svint64_t svzipq2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) -svint16_t svzipq2(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b16))) -svbool_t svpsel_lane_b16(svbool_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b32))) -svbool_t svpsel_lane_b32(svbool_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b64))) -svbool_t svpsel_lane_b64(svbool_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b8))) -svbool_t svpsel_lane_b8(svbool_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) -svfloat32_t svbfmlslb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) -svfloat32_t svbfmlslb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) -svfloat32_t svbfmlslt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) -svfloat32_t svbfmlslt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) -svfloat64_t svclamp_f64(svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) -svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) -svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) -svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) -svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) -svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) -svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) -svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) -svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) -svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) -svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c8))) -uint64_t svcntp_c8(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c32))) -uint64_t svcntp_c32(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c64))) -uint64_t svcntp_c64(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c16))) -uint64_t svcntp_c16(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) -svboolx2_t svcreate2_b(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) -svboolx4_t svcreate4_b(svbool_t, svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) -svfloat32_t svdot_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) -svint32_t svdot_s32_s16(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) -svuint32_t svdot_u32_u16(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) -svfloat32_t svdot_lane_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) -svint32_t svdot_lane_s32_s16(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) -svuint32_t svdot_lane_u32_u16(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) -svbool_t svget2_b(svboolx2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) -svbool_t svget4_b(svboolx4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) -svuint8x2_t svld1_u8_x2(svcount_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) -svint8x2_t svld1_s8_x2(svcount_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) -svuint64x2_t svld1_u64_x2(svcount_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) -svfloat64x2_t svld1_f64_x2(svcount_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) -svint64x2_t svld1_s64_x2(svcount_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) -svuint16x2_t svld1_u16_x2(svcount_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) -svbfloat16x2_t svld1_bf16_x2(svcount_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) -svfloat16x2_t svld1_f16_x2(svcount_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) -svint16x2_t svld1_s16_x2(svcount_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) -svuint32x2_t svld1_u32_x2(svcount_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) -svfloat32x2_t svld1_f32_x2(svcount_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) -svint32x2_t svld1_s32_x2(svcount_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) -svuint8x4_t svld1_u8_x4(svcount_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) -svint8x4_t svld1_s8_x4(svcount_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) -svuint64x4_t svld1_u64_x4(svcount_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) -svfloat64x4_t svld1_f64_x4(svcount_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) -svint64x4_t svld1_s64_x4(svcount_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) -svuint16x4_t svld1_u16_x4(svcount_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) -svbfloat16x4_t svld1_bf16_x4(svcount_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) -svfloat16x4_t svld1_f16_x4(svcount_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) -svint16x4_t svld1_s16_x4(svcount_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) -svuint32x4_t svld1_u32_x4(svcount_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) -svfloat32x4_t svld1_f32_x4(svcount_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) -svint32x4_t svld1_s32_x4(svcount_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) -svuint8x2_t svld1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) -svint8x2_t svld1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) -svuint64x2_t svld1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) -svfloat64x2_t svld1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) -svint64x2_t svld1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) -svuint16x2_t svld1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) -svbfloat16x2_t svld1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) -svfloat16x2_t svld1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) -svint16x2_t svld1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) -svuint32x2_t svld1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) -svfloat32x2_t svld1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) -svint32x2_t svld1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) -svuint8x4_t svld1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) -svint8x4_t svld1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) -svuint64x4_t svld1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) -svfloat64x4_t svld1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) -svint64x4_t svld1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) -svuint16x4_t svld1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) -svbfloat16x4_t svld1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) -svfloat16x4_t svld1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) -svint16x4_t svld1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) -svuint32x4_t svld1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) -svfloat32x4_t svld1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) -svint32x4_t svld1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) -svuint8x2_t svldnt1_u8_x2(svcount_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) -svint8x2_t svldnt1_s8_x2(svcount_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) -svuint64x2_t svldnt1_u64_x2(svcount_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) -svfloat64x2_t svldnt1_f64_x2(svcount_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) -svint64x2_t svldnt1_s64_x2(svcount_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) -svuint16x2_t svldnt1_u16_x2(svcount_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) -svbfloat16x2_t svldnt1_bf16_x2(svcount_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) -svfloat16x2_t svldnt1_f16_x2(svcount_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) -svint16x2_t svldnt1_s16_x2(svcount_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) -svuint32x2_t svldnt1_u32_x2(svcount_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) -svfloat32x2_t svldnt1_f32_x2(svcount_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) -svint32x2_t svldnt1_s32_x2(svcount_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) -svuint8x4_t svldnt1_u8_x4(svcount_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) -svint8x4_t svldnt1_s8_x4(svcount_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) -svuint64x4_t svldnt1_u64_x4(svcount_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) -svfloat64x4_t svldnt1_f64_x4(svcount_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) -svint64x4_t svldnt1_s64_x4(svcount_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) -svuint16x4_t svldnt1_u16_x4(svcount_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) -svbfloat16x4_t svldnt1_bf16_x4(svcount_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) -svfloat16x4_t svldnt1_f16_x4(svcount_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) -svint16x4_t svldnt1_s16_x4(svcount_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) -svuint32x4_t svldnt1_u32_x4(svcount_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) -svfloat32x4_t svldnt1_f32_x4(svcount_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) -svint32x4_t svldnt1_s32_x4(svcount_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) -svuint8x2_t svldnt1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) -svint8x2_t svldnt1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) -svuint64x2_t svldnt1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) -svfloat64x2_t svldnt1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) -svint64x2_t svldnt1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) -svuint16x2_t svldnt1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) -svbfloat16x2_t svldnt1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) -svfloat16x2_t svldnt1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) -svint16x2_t svldnt1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) -svuint32x2_t svldnt1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) -svfloat32x2_t svldnt1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) -svint32x2_t svldnt1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) -svuint8x4_t svldnt1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) -svint8x4_t svldnt1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) -svuint64x4_t svldnt1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) -svfloat64x4_t svldnt1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) -svint64x4_t svldnt1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) -svuint16x4_t svldnt1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) -svbfloat16x4_t svldnt1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) -svfloat16x4_t svldnt1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) -svint16x4_t svldnt1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) -svuint32x4_t svldnt1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) -svfloat32x4_t svldnt1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) -svint32x4_t svldnt1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8))) -svbool_t svpext_lane_c8(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32))) -svbool_t svpext_lane_c32(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64))) -svbool_t svpext_lane_c64(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16))) -svbool_t svpext_lane_c16(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8_x2))) -svboolx2_t svpext_lane_c8_x2(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32_x2))) -svboolx2_t svpext_lane_c32_x2(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64_x2))) -svboolx2_t svpext_lane_c64_x2(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16_x2))) -svboolx2_t svpext_lane_c16_x2(svcount_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_c))) -svcount_t svpfalse_c(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c16))) -svcount_t svpsel_lane_c16(svcount_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c32))) -svcount_t svpsel_lane_c32(svcount_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c64))) -svcount_t svpsel_lane_c64(svcount_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c8))) -svcount_t svpsel_lane_c8(svcount_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c8))) -svcount_t svptrue_c8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c32))) -svcount_t svptrue_c32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64))) -svcount_t svptrue_c64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16))) -svcount_t svptrue_c16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) -svint16_t svqrshrn_n_s16_s32_x2(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) -svuint16_t svqrshrn_n_u16_u32_x2(svuint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) -svuint16_t svqrshrun_n_u16_s32_x2(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) -svuint8_t svrevd_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) -svuint32_t svrevd_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) -svuint64_t svrevd_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) -svuint16_t svrevd_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) -svbfloat16_t svrevd_bf16_m(svbfloat16_t, svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) -svint8_t svrevd_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) -svfloat64_t svrevd_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) -svfloat32_t svrevd_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) -svfloat16_t svrevd_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) -svint32_t svrevd_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) -svint64_t svrevd_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) -svint16_t svrevd_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) -svuint8_t svrevd_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) -svuint32_t svrevd_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) -svuint64_t svrevd_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) -svuint16_t svrevd_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) -svbfloat16_t svrevd_bf16_x(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) -svint8_t svrevd_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) -svfloat64_t svrevd_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) -svfloat32_t svrevd_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) -svfloat16_t svrevd_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) -svint32_t svrevd_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) -svint64_t svrevd_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) -svint16_t svrevd_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) -svuint8_t svrevd_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) -svuint32_t svrevd_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) -svuint64_t svrevd_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) -svuint16_t svrevd_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) -svbfloat16_t svrevd_bf16_z(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) -svint8_t svrevd_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) -svfloat64_t svrevd_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) -svfloat32_t svrevd_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) -svfloat16_t svrevd_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) -svint32_t svrevd_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) -svint64_t svrevd_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) -svint16_t svrevd_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) -svboolx2_t svset2_b(svboolx2_t, uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) -svboolx4_t svset4_b(svboolx4_t, uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) -void svst1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) -void svst1_s8_x2(svcount_t, int8_t *, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) -void svst1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) -void svst1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) -void svst1_s64_x2(svcount_t, int64_t *, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) -void svst1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) -void svst1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) -void svst1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) -void svst1_s16_x2(svcount_t, int16_t *, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) -void svst1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) -void svst1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) -void svst1_s32_x2(svcount_t, int32_t *, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) -void svst1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) -void svst1_s8_x4(svcount_t, int8_t *, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) -void svst1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) -void svst1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) -void svst1_s64_x4(svcount_t, int64_t *, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) -void svst1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) -void svst1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) -void svst1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) -void svst1_s16_x4(svcount_t, int16_t *, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) -void svst1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) -void svst1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) -void svst1_s32_x4(svcount_t, int32_t *, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) -void svst1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) -void svst1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) -void svst1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) -void svst1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) -void svst1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) -void svst1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) -void svst1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) -void svst1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) -void svst1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) -void svst1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) -void svst1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) -void svst1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) -void svst1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) -void svst1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) -void svst1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) -void svst1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) -void svst1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) -void svst1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) -void svst1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) -void svst1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) -void svst1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) -void svst1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) -void svst1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) -void svst1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) -void svstnt1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) -void svstnt1_s8_x2(svcount_t, int8_t *, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) -void svstnt1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) -void svstnt1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) -void svstnt1_s64_x2(svcount_t, int64_t *, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) -void svstnt1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) -void svstnt1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) -void svstnt1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) -void svstnt1_s16_x2(svcount_t, int16_t *, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) -void svstnt1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) -void svstnt1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) -void svstnt1_s32_x2(svcount_t, int32_t *, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) -void svstnt1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) -void svstnt1_s8_x4(svcount_t, int8_t *, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) -void svstnt1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) -void svstnt1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) -void svstnt1_s64_x4(svcount_t, int64_t *, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) -void svstnt1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) -void svstnt1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) -void svstnt1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) -void svstnt1_s16_x4(svcount_t, int16_t *, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) -void svstnt1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) -void svstnt1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) -void svstnt1_s32_x4(svcount_t, int32_t *, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) -void svstnt1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) -void svstnt1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) -void svstnt1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) -void svstnt1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) -void svstnt1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) -void svstnt1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) -void svstnt1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) -void svstnt1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) -void svstnt1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) -void svstnt1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) -void svstnt1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) -void svstnt1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) -void svstnt1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) -void svstnt1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) -void svstnt1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) -void svstnt1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) -void svstnt1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) -void svstnt1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) -void svstnt1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) -void svstnt1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) -void svstnt1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) -void svstnt1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) -void svstnt1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) -void svstnt1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_b))) -svboolx2_t svundef2_b(); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_b))) -svboolx4_t svundef4_b(); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) -svcount_t svwhilege_c8_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) -svcount_t svwhilege_c32_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) -svcount_t svwhilege_c64_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) -svcount_t svwhilege_c16_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) -svcount_t svwhilege_c8_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) -svcount_t svwhilege_c32_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) -svcount_t svwhilege_c64_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) -svcount_t svwhilege_c16_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) -svboolx2_t svwhilege_b8_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) -svboolx2_t svwhilege_b32_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) -svboolx2_t svwhilege_b64_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) -svboolx2_t svwhilege_b16_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) -svboolx2_t svwhilege_b8_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) -svboolx2_t svwhilege_b32_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) -svboolx2_t svwhilege_b64_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) -svboolx2_t svwhilege_b16_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) -svcount_t svwhilegt_c8_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) -svcount_t svwhilegt_c32_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) -svcount_t svwhilegt_c64_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) -svcount_t svwhilegt_c16_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) -svcount_t svwhilegt_c8_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) -svcount_t svwhilegt_c32_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) -svcount_t svwhilegt_c64_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) -svcount_t svwhilegt_c16_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) -svboolx2_t svwhilegt_b8_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) -svboolx2_t svwhilegt_b32_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) -svboolx2_t svwhilegt_b64_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) -svboolx2_t svwhilegt_b16_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) -svboolx2_t svwhilegt_b8_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) -svboolx2_t svwhilegt_b32_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) -svboolx2_t svwhilegt_b64_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) -svboolx2_t svwhilegt_b16_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) -svcount_t svwhilele_c8_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) -svcount_t svwhilele_c32_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) -svcount_t svwhilele_c64_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) -svcount_t svwhilele_c16_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) -svcount_t svwhilele_c8_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) -svcount_t svwhilele_c32_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) -svcount_t svwhilele_c64_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) -svcount_t svwhilele_c16_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) -svboolx2_t svwhilele_b8_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) -svboolx2_t svwhilele_b32_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) -svboolx2_t svwhilele_b64_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) -svboolx2_t svwhilele_b16_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) -svboolx2_t svwhilele_b8_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) -svboolx2_t svwhilele_b32_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) -svboolx2_t svwhilele_b64_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) -svboolx2_t svwhilele_b16_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) -svcount_t svwhilelt_c8_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) -svcount_t svwhilelt_c32_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) -svcount_t svwhilelt_c64_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) -svcount_t svwhilelt_c16_u64(uint64_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) -svcount_t svwhilelt_c8_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) -svcount_t svwhilelt_c32_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) -svcount_t svwhilelt_c64_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) -svcount_t svwhilelt_c16_s64(int64_t, int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) -svboolx2_t svwhilelt_b8_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) -svboolx2_t svwhilelt_b32_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) -svboolx2_t svwhilelt_b64_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) -svboolx2_t svwhilelt_b16_u64_x2(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) -svboolx2_t svwhilelt_b8_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) -svboolx2_t svwhilelt_b32_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) -svboolx2_t svwhilelt_b64_s64_x2(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) -svboolx2_t svwhilelt_b16_s64_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) -svfloat32_t svbfmlslb(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) -svfloat32_t svbfmlslb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) -svfloat32_t svbfmlslt(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) -svfloat32_t svbfmlslt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) -svfloat64_t svclamp(svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) -svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) -svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) -svint8_t svclamp(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) -svint32_t svclamp(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) -svint64_t svclamp(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) -svint16_t svclamp(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) -svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) -svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) -svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) -svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) -svboolx2_t svcreate2(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) -svboolx4_t svcreate4(svbool_t, svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) -svfloat32_t svdot(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) -svint32_t svdot(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) -svuint32_t svdot(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) -svfloat32_t svdot_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) -svint32_t svdot_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) -svuint32_t svdot_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) -svbool_t svget2(svboolx2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) -svbool_t svget4(svboolx4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) -svuint8x2_t svld1_x2(svcount_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) -svint8x2_t svld1_x2(svcount_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) -svuint64x2_t svld1_x2(svcount_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) -svfloat64x2_t svld1_x2(svcount_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) -svint64x2_t svld1_x2(svcount_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) -svuint16x2_t svld1_x2(svcount_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) -svbfloat16x2_t svld1_x2(svcount_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) -svfloat16x2_t svld1_x2(svcount_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) -svint16x2_t svld1_x2(svcount_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) -svuint32x2_t svld1_x2(svcount_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) -svfloat32x2_t svld1_x2(svcount_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) -svint32x2_t svld1_x2(svcount_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) -svuint8x4_t svld1_x4(svcount_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) -svint8x4_t svld1_x4(svcount_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) -svuint64x4_t svld1_x4(svcount_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) -svfloat64x4_t svld1_x4(svcount_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) -svint64x4_t svld1_x4(svcount_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) -svuint16x4_t svld1_x4(svcount_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) -svbfloat16x4_t svld1_x4(svcount_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) -svfloat16x4_t svld1_x4(svcount_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) -svint16x4_t svld1_x4(svcount_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) -svuint32x4_t svld1_x4(svcount_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) -svfloat32x4_t svld1_x4(svcount_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) -svint32x4_t svld1_x4(svcount_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) -svuint8x2_t svld1_vnum_x2(svcount_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) -svint8x2_t svld1_vnum_x2(svcount_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) -svuint64x2_t svld1_vnum_x2(svcount_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) -svfloat64x2_t svld1_vnum_x2(svcount_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) -svint64x2_t svld1_vnum_x2(svcount_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) -svuint16x2_t svld1_vnum_x2(svcount_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) -svbfloat16x2_t svld1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) -svfloat16x2_t svld1_vnum_x2(svcount_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) -svint16x2_t svld1_vnum_x2(svcount_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) -svuint32x2_t svld1_vnum_x2(svcount_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) -svfloat32x2_t svld1_vnum_x2(svcount_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) -svint32x2_t svld1_vnum_x2(svcount_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) -svuint8x4_t svld1_vnum_x4(svcount_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) -svint8x4_t svld1_vnum_x4(svcount_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) -svuint64x4_t svld1_vnum_x4(svcount_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) -svfloat64x4_t svld1_vnum_x4(svcount_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) -svint64x4_t svld1_vnum_x4(svcount_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) -svuint16x4_t svld1_vnum_x4(svcount_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) -svbfloat16x4_t svld1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) -svfloat16x4_t svld1_vnum_x4(svcount_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) -svint16x4_t svld1_vnum_x4(svcount_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) -svuint32x4_t svld1_vnum_x4(svcount_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) -svfloat32x4_t svld1_vnum_x4(svcount_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) -svint32x4_t svld1_vnum_x4(svcount_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) -svuint8x2_t svldnt1_x2(svcount_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) -svint8x2_t svldnt1_x2(svcount_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) -svuint64x2_t svldnt1_x2(svcount_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) -svfloat64x2_t svldnt1_x2(svcount_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) -svint64x2_t svldnt1_x2(svcount_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) -svuint16x2_t svldnt1_x2(svcount_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) -svbfloat16x2_t svldnt1_x2(svcount_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) -svfloat16x2_t svldnt1_x2(svcount_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) -svint16x2_t svldnt1_x2(svcount_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) -svuint32x2_t svldnt1_x2(svcount_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) -svfloat32x2_t svldnt1_x2(svcount_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) -svint32x2_t svldnt1_x2(svcount_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) -svuint8x4_t svldnt1_x4(svcount_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) -svint8x4_t svldnt1_x4(svcount_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) -svuint64x4_t svldnt1_x4(svcount_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) -svfloat64x4_t svldnt1_x4(svcount_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) -svint64x4_t svldnt1_x4(svcount_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) -svuint16x4_t svldnt1_x4(svcount_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) -svbfloat16x4_t svldnt1_x4(svcount_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) -svfloat16x4_t svldnt1_x4(svcount_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) -svint16x4_t svldnt1_x4(svcount_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) -svuint32x4_t svldnt1_x4(svcount_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) -svfloat32x4_t svldnt1_x4(svcount_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) -svint32x4_t svldnt1_x4(svcount_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) -svuint8x2_t svldnt1_vnum_x2(svcount_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) -svint8x2_t svldnt1_vnum_x2(svcount_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) -svuint64x2_t svldnt1_vnum_x2(svcount_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) -svfloat64x2_t svldnt1_vnum_x2(svcount_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) -svint64x2_t svldnt1_vnum_x2(svcount_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) -svuint16x2_t svldnt1_vnum_x2(svcount_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) -svbfloat16x2_t svldnt1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) -svfloat16x2_t svldnt1_vnum_x2(svcount_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) -svint16x2_t svldnt1_vnum_x2(svcount_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) -svuint32x2_t svldnt1_vnum_x2(svcount_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) -svfloat32x2_t svldnt1_vnum_x2(svcount_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) -svint32x2_t svldnt1_vnum_x2(svcount_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) -svuint8x4_t svldnt1_vnum_x4(svcount_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) -svint8x4_t svldnt1_vnum_x4(svcount_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) -svuint64x4_t svldnt1_vnum_x4(svcount_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) -svfloat64x4_t svldnt1_vnum_x4(svcount_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) -svint64x4_t svldnt1_vnum_x4(svcount_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) -svuint16x4_t svldnt1_vnum_x4(svcount_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) -svbfloat16x4_t svldnt1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) -svfloat16x4_t svldnt1_vnum_x4(svcount_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) -svint16x4_t svldnt1_vnum_x4(svcount_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) -svuint32x4_t svldnt1_vnum_x4(svcount_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) -svfloat32x4_t svldnt1_vnum_x4(svcount_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) -svint32x4_t svldnt1_vnum_x4(svcount_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) -svint16_t svqrshrn_s16(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) -svuint16_t svqrshrn_u16(svuint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) -svuint16_t svqrshrun_u16(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) -svuint8_t svrevd_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) -svuint32_t svrevd_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) -svuint64_t svrevd_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) -svuint16_t svrevd_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) -svbfloat16_t svrevd_m(svbfloat16_t, svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) -svint8_t svrevd_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) -svfloat64_t svrevd_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) -svfloat32_t svrevd_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) -svfloat16_t svrevd_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) -svint32_t svrevd_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) -svint64_t svrevd_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) -svint16_t svrevd_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) -svuint8_t svrevd_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) -svuint32_t svrevd_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) -svuint64_t svrevd_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) -svuint16_t svrevd_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) -svbfloat16_t svrevd_x(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) -svint8_t svrevd_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) -svfloat64_t svrevd_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) -svfloat32_t svrevd_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) -svfloat16_t svrevd_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) -svint32_t svrevd_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) -svint64_t svrevd_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) -svint16_t svrevd_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) -svuint8_t svrevd_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) -svuint32_t svrevd_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) -svuint64_t svrevd_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) -svuint16_t svrevd_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) -svbfloat16_t svrevd_z(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) -svint8_t svrevd_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) -svfloat64_t svrevd_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) -svfloat32_t svrevd_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) -svfloat16_t svrevd_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) -svint32_t svrevd_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) -svint64_t svrevd_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) -svint16_t svrevd_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) -svboolx2_t svset2(svboolx2_t, uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) -svboolx4_t svset4(svboolx4_t, uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) -void svst1(svcount_t, uint8_t *, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) -void svst1(svcount_t, int8_t *, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) -void svst1(svcount_t, uint64_t *, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) -void svst1(svcount_t, float64_t *, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) -void svst1(svcount_t, int64_t *, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) -void svst1(svcount_t, uint16_t *, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) -void svst1(svcount_t, bfloat16_t *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) -void svst1(svcount_t, float16_t *, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) -void svst1(svcount_t, int16_t *, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) -void svst1(svcount_t, uint32_t *, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) -void svst1(svcount_t, float32_t *, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) -void svst1(svcount_t, int32_t *, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) -void svst1(svcount_t, uint8_t *, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) -void svst1(svcount_t, int8_t *, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) -void svst1(svcount_t, uint64_t *, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) -void svst1(svcount_t, float64_t *, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) -void svst1(svcount_t, int64_t *, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) -void svst1(svcount_t, uint16_t *, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) -void svst1(svcount_t, bfloat16_t *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) -void svst1(svcount_t, float16_t *, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) -void svst1(svcount_t, int16_t *, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) -void svst1(svcount_t, uint32_t *, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) -void svst1(svcount_t, float32_t *, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) -void svst1(svcount_t, int32_t *, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) -void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) -void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) -void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) -void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) -void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) -void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) -void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) -void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) -void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) -void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) -void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) -void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) -void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) -void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) -void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) -void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) -void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) -void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) -void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) -void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) -void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) -void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) -void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) -void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) -void svstnt1(svcount_t, uint8_t *, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) -void svstnt1(svcount_t, int8_t *, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) -void svstnt1(svcount_t, uint64_t *, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) -void svstnt1(svcount_t, float64_t *, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) -void svstnt1(svcount_t, int64_t *, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) -void svstnt1(svcount_t, uint16_t *, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) -void svstnt1(svcount_t, bfloat16_t *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) -void svstnt1(svcount_t, float16_t *, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) -void svstnt1(svcount_t, int16_t *, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) -void svstnt1(svcount_t, uint32_t *, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) -void svstnt1(svcount_t, float32_t *, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) -void svstnt1(svcount_t, int32_t *, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) -void svstnt1(svcount_t, uint8_t *, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) -void svstnt1(svcount_t, int8_t *, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) -void svstnt1(svcount_t, uint64_t *, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) -void svstnt1(svcount_t, float64_t *, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) -void svstnt1(svcount_t, int64_t *, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) -void svstnt1(svcount_t, uint16_t *, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) -void svstnt1(svcount_t, bfloat16_t *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) -void svstnt1(svcount_t, float16_t *, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) -void svstnt1(svcount_t, int16_t *, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) -void svstnt1(svcount_t, uint32_t *, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) -void svstnt1(svcount_t, float32_t *, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) -void svstnt1(svcount_t, int32_t *, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) -void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) -void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) -void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) -void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) -void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) -void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) -void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) -void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) -void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) -void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) -void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) -void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) -void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) -void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) -void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) -void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) -void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) -void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) -void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) -void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) -void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) -void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) -void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) -void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) -svcount_t svwhilege_c8(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) -svcount_t svwhilege_c32(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) -svcount_t svwhilege_c64(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) -svcount_t svwhilege_c16(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) -svcount_t svwhilege_c8(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) -svcount_t svwhilege_c32(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) -svcount_t svwhilege_c64(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) -svcount_t svwhilege_c16(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) -svboolx2_t svwhilege_b8_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) -svboolx2_t svwhilege_b32_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) -svboolx2_t svwhilege_b64_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) -svboolx2_t svwhilege_b16_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) -svboolx2_t svwhilege_b8_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) -svboolx2_t svwhilege_b32_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) -svboolx2_t svwhilege_b64_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) -svboolx2_t svwhilege_b16_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) -svcount_t svwhilegt_c8(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) -svcount_t svwhilegt_c32(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) -svcount_t svwhilegt_c64(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) -svcount_t svwhilegt_c16(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) -svcount_t svwhilegt_c8(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) -svcount_t svwhilegt_c32(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) -svcount_t svwhilegt_c64(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) -svcount_t svwhilegt_c16(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) -svboolx2_t svwhilegt_b8_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) -svboolx2_t svwhilegt_b32_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) -svboolx2_t svwhilegt_b64_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) -svboolx2_t svwhilegt_b16_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) -svboolx2_t svwhilegt_b8_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) -svboolx2_t svwhilegt_b32_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) -svboolx2_t svwhilegt_b64_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) -svboolx2_t svwhilegt_b16_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) -svcount_t svwhilele_c8(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) -svcount_t svwhilele_c32(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) -svcount_t svwhilele_c64(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) -svcount_t svwhilele_c16(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) -svcount_t svwhilele_c8(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) -svcount_t svwhilele_c32(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) -svcount_t svwhilele_c64(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) -svcount_t svwhilele_c16(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) -svboolx2_t svwhilele_b8_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) -svboolx2_t svwhilele_b32_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) -svboolx2_t svwhilele_b64_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) -svboolx2_t svwhilele_b16_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) -svboolx2_t svwhilele_b8_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) -svboolx2_t svwhilele_b32_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) -svboolx2_t svwhilele_b64_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) -svboolx2_t svwhilele_b16_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) -svcount_t svwhilelt_c8(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) -svcount_t svwhilelt_c32(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) -svcount_t svwhilelt_c64(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) -svcount_t svwhilelt_c16(uint64_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) -svcount_t svwhilelt_c8(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) -svcount_t svwhilelt_c32(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) -svcount_t svwhilelt_c64(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) -svcount_t svwhilelt_c16(int64_t, int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) -svboolx2_t svwhilelt_b8_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) -svboolx2_t svwhilelt_b32_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) -svboolx2_t svwhilelt_b64_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) -svboolx2_t svwhilelt_b16_x2(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) -svboolx2_t svwhilelt_b8_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) -svboolx2_t svwhilelt_b32_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) -svboolx2_t svwhilelt_b64_x2(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) -svboolx2_t svwhilelt_b16_x2(int64_t, int64_t); #define svcvtnt_bf16_x svcvtnt_bf16_m #define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m #define svcvtnt_f16_x svcvtnt_f16_m diff --git a/lib/include/arm_vector_types.h b/lib/include/arm_vector_types.h index b0dd66b07f..8e79d39a60 100644 --- a/lib/include/arm_vector_types.h +++ b/lib/include/arm_vector_types.h @@ -16,7 +16,7 @@ #define __ARM_NEON_TYPES_H typedef float float32_t; typedef __fp16 float16_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef double float64_t; #endif @@ -40,7 +40,7 @@ typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; #endif @@ -125,7 +125,7 @@ typedef struct float32x4x2_t { float32x4_t val[2]; } float32x4x2_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x2_t { float64x1_t val[2]; } float64x1x2_t; @@ -215,7 +215,7 @@ typedef struct float32x4x3_t { float32x4_t val[3]; } float32x4x3_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x3_t { float64x1_t val[3]; } float64x1x3_t; @@ -305,7 +305,7 @@ typedef struct float32x4x4_t { float32x4_t val[4]; } float32x4x4_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x4_t { float64x1_t val[4]; } float64x1x4_t; diff --git a/lib/include/avx512erintrin.h b/lib/include/avx512erintrin.h deleted file mode 100644 index 1c5a2d2d20..0000000000 --- a/lib/include/avx512erintrin.h +++ /dev/null @@ -1,271 +0,0 @@ -/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX512ERINTRIN_H -#define __AVX512ERINTRIN_H - -/* exp2a23 */ -#define _mm512_exp2a23_round_pd(A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_exp2a23_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_exp2a23_pd(A) \ - _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_exp2a23_pd(S, M, A) \ - _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_exp2a23_pd(M, A) \ - _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_exp2a23_round_ps(A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_exp2a23_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_exp2a23_ps(A) \ - _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_exp2a23_ps(S, M, A) \ - _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_exp2a23_ps(M, A) \ - _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -/* rsqrt28 */ -#define _mm512_rsqrt28_round_pd(A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_rsqrt28_pd(A) \ - _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rsqrt28_pd(S, M, A) \ - _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rsqrt28_pd(M, A) \ - _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_rsqrt28_round_ps(A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_rsqrt28_ps(A) \ - _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rsqrt28_ps(S, M, A) \ - _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rsqrt28_ps(M, A) \ - _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rsqrt28_round_ss(A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rsqrt28_ss(A, B) \ - _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rsqrt28_ss(S, M, A, B) \ - _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rsqrt28_ss(M, A, B) \ - _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rsqrt28_round_sd(A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rsqrt28_sd(A, B) \ - _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rsqrt28_sd(S, M, A, B) \ - _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rsqrt28_sd(M, A, B) \ - _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -/* rcp28 */ -#define _mm512_rcp28_round_pd(A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_rcp28_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_rcp28_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_rcp28_pd(A) \ - _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rcp28_pd(S, M, A) \ - _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rcp28_pd(M, A) \ - _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_rcp28_round_ps(A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_rcp28_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_rcp28_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_rcp28_ps(A) \ - _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rcp28_ps(S, M, A) \ - _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rcp28_ps(M, A) \ - _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rcp28_round_ss(A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rcp28_round_ss(M, A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rcp28_ss(A, B) \ - _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rcp28_ss(S, M, A, B) \ - _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rcp28_ss(M, A, B) \ - _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rcp28_round_sd(A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rcp28_round_sd(M, A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rcp28_sd(A, B) \ - _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rcp28_sd(S, M, A, B) \ - _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rcp28_sd(M, A, B) \ - _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#endif /* __AVX512ERINTRIN_H */ diff --git a/lib/include/avx512fp16intrin.h b/lib/include/avx512fp16intrin.h index 4123f10c39..e136aa14a1 100644 --- a/lib/include/avx512fp16intrin.h +++ b/lib/include/avx512fp16intrin.h @@ -96,8 +96,8 @@ _mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, (h5), (h4), (h3), (h2), (h1)) static __inline __m512h __DEFAULT_FN_ATTRS512 -_mm512_set1_pch(_Float16 _Complex h) { - return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, h)); +_mm512_set1_pch(_Float16 _Complex __h) { + return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, __h)); } static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) { @@ -282,75 +282,75 @@ _mm512_zextph256_ph512(__m256h __a) { #define _mm_comi_sh(A, B, pred) \ _mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION) -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_US, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_US, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_UQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_UQ, _MM_FROUND_CUR_DIRECTION); } diff --git a/lib/include/avx512pfintrin.h b/lib/include/avx512pfintrin.h deleted file mode 100644 index f853be021a..0000000000 --- a/lib/include/avx512pfintrin.h +++ /dev/null @@ -1,92 +0,0 @@ -/*===------------- avx512pfintrin.h - PF intrinsics ------------------------=== - * - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX512PFINTRIN_H -#define __AVX512PFINTRIN_H - -#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \ - __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfdps((__mmask16)(mask), \ - (__v16si)(__m512i)(index), (void const *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \ - __builtin_ia32_gatherpfdps((__mmask16) -1, \ - (__v16si)(__m512i)(index), (void const *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \ - __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \ - __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \ - __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \ - __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfdps((__mmask16)(mask), \ - (__v16si)(__m512i)(index), (void *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \ - __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \ - __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#endif diff --git a/lib/include/avxintrin.h b/lib/include/avxintrin.h index f116d8bc3a..4983f33113 100644 --- a/lib/include/avxintrin.h +++ b/lib/include/avxintrin.h @@ -207,6 +207,8 @@ _mm256_div_ps(__m256 __a, __m256 __b) /// Compares two 256-bit vectors of [4 x double] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPD instruction. @@ -226,6 +228,8 @@ _mm256_max_pd(__m256d __a, __m256d __b) /// Compares two 256-bit vectors of [8 x float] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPS instruction. @@ -245,6 +249,8 @@ _mm256_max_ps(__m256 __a, __m256 __b) /// Compares two 256-bit vectors of [4 x double] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPD instruction. @@ -264,6 +270,8 @@ _mm256_min_pd(__m256d __a, __m256d __b) /// Compares two 256-bit vectors of [8 x float] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPS instruction. @@ -832,6 +840,7 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c) /// Copies the values stored in a 128-bit vector of [4 x float] as /// specified by the 128-bit integer vector operand. +/// /// \headerfile /// /// This intrinsic corresponds to the VPERMILPS instruction. @@ -1574,14 +1583,6 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) (__v4df)(__m256d)(b), (int)(mask))) /* Compare */ -#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ -#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ -#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ -#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ -#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ -#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ -#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ -#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */ #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ @@ -1607,13 +1608,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ +/* Below intrinsic defined in emmintrin.h can be used for AVX */ /// Compares each of the corresponding double-precision values of two /// 128-bit vectors of [2 x double], using the operation specified by the /// immediate integer operand. /// -/// Returns a [2 x double] vector consisting of two doubles corresponding to -/// the two comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1663,17 +1665,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [2 x double] containing the comparison results. -#define _mm_cmp_pd(a, b, c) \ - ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ - (__v2df)(__m128d)(b), (c))) +/// \fn __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c) +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ /// Compares each of the corresponding values of two 128-bit vectors of /// [4 x float], using the operation specified by the immediate integer /// operand. /// -/// Returns a [4 x float] vector consisting of four floats corresponding to -/// the four comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1723,17 +1724,15 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [4 x float] containing the comparison results. -#define _mm_cmp_ps(a, b, c) \ - ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), (c))) +/// \fn __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c) /// Compares each of the corresponding double-precision values of two /// 256-bit vectors of [4 x double], using the operation specified by the /// immediate integer operand. /// -/// Returns a [4 x double] vector consisting of four doubles corresponding to -/// the four comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1791,9 +1790,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// [8 x float], using the operation specified by the immediate integer /// operand. /// -/// Returns a [8 x float] vector consisting of eight floats corresponding to -/// the eight comparison results: zero if the comparison is false, and all -/// 1's if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1847,12 +1846,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ (__v8sf)(__m256)(b), (c))) +/* Below intrinsic defined in emmintrin.h can be used for AVX */ /// Compares each of the corresponding scalar double-precision values of /// two 128-bit vectors of [2 x double], using the operation specified by the /// immediate integer operand. /// -/// If the result is true, all 64 bits of the destination vector are set; -/// otherwise they are cleared. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1902,16 +1903,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [2 x double] containing the comparison results. -#define _mm_cmp_sd(a, b, c) \ - ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ - (__v2df)(__m128d)(b), (c))) +/// \fn __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c) +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ /// Compares each of the corresponding scalar values of two 128-bit /// vectors of [4 x float], using the operation specified by the immediate /// integer operand. /// -/// If the result is true, all 32 bits of the destination vector are set; -/// otherwise they are cleared. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1961,9 +1962,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [4 x float] containing the comparison results. -#define _mm_cmp_ss(a, b, c) \ - ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), (c))) +/// \fn __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c) /// Takes a [8 x i32] vector and returns the vector element value /// indexed by the immediate constant operand. @@ -2213,6 +2212,10 @@ _mm256_cvtpd_ps(__m256d __a) /// Converts a vector of [8 x float] into a vector of [8 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPS2DQ instruction. @@ -2242,9 +2245,13 @@ _mm256_cvtps_pd(__m128 __a) return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); } -/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 -/// x i32], truncating the result by rounding towards zero when it is -/// inexact. +/// Converts a 256-bit vector of [4 x double] into four signed truncated +/// (rounded toward zero) 32-bit integers returned in a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2259,9 +2266,12 @@ _mm256_cvttpd_epi32(__m256d __a) return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); } -/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 -/// x i32]. When a conversion is inexact, the value returned is rounded -/// according to the rounding control bits in the MXCSR register. +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2276,8 +2286,12 @@ _mm256_cvtpd_epi32(__m256d __a) return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); } -/// Converts a vector of [8 x float] into a vector of [8 x i32], -/// truncating the result by rounding towards zero when it is inexact. +/// Converts a vector of [8 x float] into eight signed truncated (rounded +/// toward zero) 32-bit integers returned in a vector of [8 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// diff --git a/lib/include/bmiintrin.h b/lib/include/bmiintrin.h index d8e57c0cb4..78bffe68e2 100644 --- a/lib/include/bmiintrin.h +++ b/lib/include/bmiintrin.h @@ -161,8 +161,7 @@ _mm_tzcnt_64(unsigned long long __X) #undef __RELAXED_FN_ATTRS -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__BMI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi"))) @@ -610,7 +609,6 @@ __blsr_u64(unsigned long long __X) #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__BMI__) */ +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */ #endif /* __BMIINTRIN_H */ diff --git a/lib/include/builtins.h b/lib/include/builtins.h index 65095861ca..1e534e632c 100644 --- a/lib/include/builtins.h +++ b/lib/include/builtins.h @@ -13,4 +13,7 @@ #ifndef __BUILTINS_H #define __BUILTINS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#endif /* __MVS__ */ #endif /* __BUILTINS_H */ diff --git a/lib/include/cpuid.h b/lib/include/cpuid.h index 1ad6853a97..82d995f1b9 100644 --- a/lib/include/cpuid.h +++ b/lib/include/cpuid.h @@ -10,7 +10,7 @@ #ifndef __CPUID_H #define __CPUID_H -#if !(__x86_64__ || __i386__) +#if !defined(__x86_64__) && !defined(__i386__) #error this header is for x86 only #endif @@ -200,6 +200,9 @@ #define bit_AMXINT8 0x02000000 /* Features in %eax for leaf 7 sub-leaf 1 */ +#define bit_SHA512 0x00000001 +#define bit_SM3 0x00000002 +#define bit_SM4 0x00000004 #define bit_RAOINT 0x00000008 #define bit_AVXVNNI 0x00000010 #define bit_AVX512BF16 0x00000020 @@ -211,7 +214,12 @@ /* Features in %edx for leaf 7 sub-leaf 1 */ #define bit_AVXVNNIINT8 0x00000010 #define bit_AVXNECONVERT 0x00000020 +#define bit_AMXCOMPLEX 0x00000100 +#define bit_AVXVNNIINT16 0x00000400 #define bit_PREFETCHI 0x00004000 +#define bit_USERMSR 0x00008000 +#define bit_AVX10 0x00080000 +#define bit_APXF 0x00200000 /* Features in %eax for leaf 13 sub-leaf 1 */ #define bit_XSAVEOPT 0x00000001 @@ -244,8 +252,11 @@ #define bit_RDPRU 0x00000010 #define bit_WBNOINVD 0x00000200 +/* Features in %ebx for leaf 0x24 */ +#define bit_AVX10_256 0x00020000 +#define bit_AVX10_512 0x00040000 -#if __i386__ +#ifdef __i386__ #define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \ : "0"(__leaf)) @@ -274,7 +285,7 @@ static __inline unsigned int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig) { unsigned int __eax, __ebx, __ecx, __edx; -#if __i386__ +#ifdef __i386__ int __cpuid_supported; __asm(" pushfl\n" @@ -328,4 +339,13 @@ static __inline int __get_cpuid_count (unsigned int __leaf, return 1; } +// In some configurations, __cpuidex is defined as a builtin (primarily +// -fms-extensions) which will conflict with the __cpuidex definition below. +#if !(__has_builtin(__cpuidex)) +static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) { + __cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2], + __cpu_info[3]); +} +#endif + #endif /* __CPUID_H */ diff --git a/lib/include/cuda_wrappers/algorithm b/lib/include/cuda_wrappers/algorithm index f14a0b00bb..3f59f28ae3 100644 --- a/lib/include/cuda_wrappers/algorithm +++ b/lib/include/cuda_wrappers/algorithm @@ -99,7 +99,7 @@ template __attribute__((enable_if(true, ""))) inline _CPP14_CONSTEXPR __host__ __device__ const __T & min(const __T &__a, const __T &__b) { - return __a < __b ? __a : __b; + return __b < __a ? __b : __a; } #pragma pop_macro("_CPP14_CONSTEXPR") diff --git a/lib/include/emmintrin.h b/lib/include/emmintrin.h index 96e3ebdecb..4dff642135 100644 --- a/lib/include/emmintrin.h +++ b/lib/include/emmintrin.h @@ -259,6 +259,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) { /// result. The upper 64 bits of the result are copied from the upper /// double-precision value of the first operand. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINSD / MINSD instruction. @@ -278,9 +280,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, } /// Performs element-by-element comparison of the two 128-bit vectors of -/// [2 x double] and returns the vector containing the lesser of each pair of +/// [2 x double] and returns a vector containing the lesser of each pair of /// values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPD / MINPD instruction. @@ -301,6 +305,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, /// result. The upper 64 bits of the result are copied from the upper /// double-precision value of the first operand. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXSD / MAXSD instruction. @@ -320,9 +326,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, } /// Performs element-by-element comparison of the two 128-bit vectors of -/// [2 x double] and returns the vector containing the greater of each pair +/// [2 x double] and returns a vector containing the greater of each pair /// of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPD / MAXPD instruction. @@ -410,8 +418,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, } /// Compares each of the corresponding double-precision values of the -/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0 -/// for false, 0xFFFFFFFFFFFFFFFF for true. +/// 128-bit vectors of [2 x double] for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -429,8 +439,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, /// Compares each of the corresponding double-precision values of the /// 128-bit vectors of [2 x double] to determine if the values in the first -/// operand are less than those in the second operand. Each comparison -/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// operand are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -450,7 +462,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are less than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -470,7 +483,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -490,7 +504,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are greater than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -510,8 +525,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are ordered with respect to those in the second operand. /// -/// A pair of double-precision values are "ordered" with respect to each -/// other if neither value is a NaN. Each comparison yields 0x0 for false, +/// A pair of double-precision values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, /// 0xFFFFFFFFFFFFFFFF for true. /// /// \headerfile @@ -532,8 +547,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are unordered with respect to those in the second operand. /// -/// A pair of double-precision values are "unordered" with respect to each -/// other if one or both values are NaN. Each comparison yields 0x0 for +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for /// false, 0xFFFFFFFFFFFFFFFF for true. /// /// \headerfile @@ -555,7 +570,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are unequal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -575,7 +591,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -595,7 +612,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not less than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -615,7 +633,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -635,7 +654,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not greater than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -654,7 +674,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -678,7 +699,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -702,7 +724,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -726,7 +749,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -751,7 +775,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -773,11 +798,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] to determine if -/// the value in the first parameter is "ordered" with respect to the +/// the value in the first parameter is ordered with respect to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair -/// of double-precision values are "ordered" with respect to each other if +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are ordered with respect to each other if /// neither value is a NaN. /// /// \headerfile @@ -799,11 +824,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] to determine if -/// the value in the first parameter is "unordered" with respect to the +/// the value in the first parameter is unordered with respect to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair -/// of double-precision values are "unordered" with respect to each other if +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are unordered with respect to each other if /// one or both values are NaN. /// /// \headerfile @@ -829,7 +854,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -853,7 +879,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, /// the value in the first parameter is not less than the corresponding /// value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -877,7 +904,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, /// the value in the first parameter is not less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -901,7 +929,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, /// the value in the first parameter is not greater than the corresponding /// value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -926,7 +955,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, /// the value in the first parameter is not greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -949,8 +979,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -962,8 +992,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); @@ -974,8 +1003,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -987,8 +1016,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); @@ -999,8 +1027,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1012,8 +1040,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b); @@ -1024,8 +1051,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1037,8 +1064,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b); @@ -1049,8 +1075,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1062,8 +1088,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b); @@ -1074,8 +1099,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1087,18 +1112,17 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b); } /// Compares the lower double-precision floating-point values in each of -/// the two 128-bit floating-point vectors of [2 x double] for equality. The -/// comparison yields 0 for false, 1 for true. +/// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// If either of the two lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1110,8 +1134,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b); @@ -1122,8 +1145,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1135,8 +1158,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b); @@ -1147,8 +1169,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1160,8 +1182,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b); @@ -1172,8 +1193,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1185,8 +1206,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b); @@ -1197,8 +1217,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1210,8 +1230,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b); @@ -1222,8 +1241,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1235,8 +1254,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison result. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// \returns An integer containing the comparison result. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b); @@ -1304,6 +1322,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) { /// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper /// 64 bits of the result vector are set to zero. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction. @@ -1319,6 +1341,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) { /// Converts the low-order element of a 128-bit vector of [2 x double] /// into a 32-bit signed integer value. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. @@ -1404,12 +1430,13 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, } /// Converts the two double-precision floating-point elements of a -/// 128-bit vector of [2 x double] into two signed 32-bit integer values, -/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. +/// 128-bit vector of [2 x double] into two signed truncated (rounded +/// toward zero) 32-bit integer values, returned in the lower 64 bits +/// of a 128-bit vector of [4 x i32]. /// -/// If the result of either conversion is inexact, the result is truncated -/// (rounded towards zero) regardless of the current MXCSR setting. The upper -/// 64 bits of the result vector are set to zero. +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1425,7 +1452,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) { } /// Converts the low-order element of a [2 x double] vector into a 32-bit -/// signed integer value, truncating the result when it is inexact. +/// signed truncated (rounded toward zero) integer value. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1444,6 +1475,10 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) { /// 128-bit vector of [2 x double] into two signed 32-bit integer values, /// returned in a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPD2PI instruction. @@ -1456,11 +1491,12 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) { } /// Converts the two double-precision floating-point elements of a -/// 128-bit vector of [2 x double] into two signed 32-bit integer values, -/// returned in a 64-bit vector of [2 x i32]. +/// 128-bit vector of [2 x double] into two signed truncated (rounded toward +/// zero) 32-bit integer values, returned in a 64-bit vector of [2 x i32]. /// -/// If the result of either conversion is inexact, the result is truncated -/// (rounded towards zero) regardless of the current MXCSR setting. +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1735,7 +1771,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) { /// lower 64 bits contain the value of the parameter. The upper 64 bits are /// set to zero. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) { - return __extension__(__m128d){__w, 0}; + return __extension__(__m128d){__w, 0.0}; } /// Constructs a 128-bit floating-point vector of [2 x double], with each @@ -2099,9 +2135,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, } /// Adds, with saturation, the corresponding elements of two 128-bit -/// signed [16 x i8] vectors, saving each sum in the corresponding element of -/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are -/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80. +/// signed [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// @@ -2119,10 +2157,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, } /// Adds, with saturation, the corresponding elements of two 128-bit -/// signed [8 x i16] vectors, saving each sum in the corresponding element of -/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF -/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// signed [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -2141,8 +2180,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, /// Adds, with saturation, the corresponding elements of two 128-bit /// unsigned [16 x i8] vectors, saving each sum in the corresponding element -/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF -/// are saturated to 0xFF. Negative sums are saturated to 0x00. +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0xFF are saturated to 0xFF. Negative sums are +/// saturated to 0x00. /// /// \headerfile /// @@ -2161,8 +2202,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, /// Adds, with saturation, the corresponding elements of two 128-bit /// unsigned [8 x i16] vectors, saving each sum in the corresponding element -/// of a 128-bit result vector of [8 x i16]. Positive sums greater than -/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000. +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0xFFFF are saturated to 0xFFFF. Negative sums +/// are saturated to 0x0000. /// /// \headerfile /// @@ -2518,10 +2561,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, return (__m128i)((__v2du)__a - (__v2du)__b); } -/// Subtracts corresponding 8-bit signed integer values in the input and -/// returns the differences in the corresponding bytes in the destination. -/// Differences greater than 0x7F are saturated to 0x7F, and differences less -/// than 0x80 are saturated to 0x80. +/// Subtracts, with saturation, corresponding 8-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences greater than 0x7F are saturated to 0x7F, and differences +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// @@ -2538,8 +2583,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b); } -/// Subtracts corresponding 16-bit signed integer values in the input and -/// returns the differences in the corresponding bytes in the destination. +/// Subtracts, with saturation, corresponding 16-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// /// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less /// than 0x8000 are saturated to 0x8000. /// @@ -2558,9 +2605,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b); } -/// Subtracts corresponding 8-bit unsigned integer values in the input -/// and returns the differences in the corresponding bytes in the -/// destination. Differences less than 0x00 are saturated to 0x00. +/// Subtracts, with saturation, corresponding 8-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x00 are saturated to 0x00. /// /// \headerfile /// @@ -2577,9 +2626,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b); } -/// Subtracts corresponding 16-bit unsigned integer values in the input -/// and returns the differences in the corresponding bytes in the -/// destination. Differences less than 0x0000 are saturated to 0x0000. +/// Subtracts, with saturation, corresponding 16-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x0000 are saturated to 0x0000. /// /// \headerfile /// @@ -3008,8 +3059,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, } /// Compares each of the corresponding 8-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF -/// for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3026,8 +3078,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, } /// Compares each of the corresponding 16-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, -/// 0xFFFF for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3044,8 +3097,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, } /// Compares each of the corresponding 32-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, -/// 0xFFFFFFFF for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3063,8 +3117,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, /// Compares each of the corresponding signed 8-bit values of the 128-bit /// integer vectors to determine if the values in the first operand are -/// greater than those in the second operand. Each comparison yields 0x0 for -/// false, 0xFF for true. +/// greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3086,7 +3141,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3106,7 +3161,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3126,7 +3181,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, /// integer vectors to determine if the values in the first operand are less /// than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFF for true. +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3146,7 +3201,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3166,7 +3221,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3207,7 +3262,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a, } /// Converts the first (lower) element of a vector of [2 x double] into a -/// 64-bit signed integer value, according to the current rounding mode. +/// 64-bit signed integer value. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -3222,7 +3281,11 @@ static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) { } /// Converts the first (lower) element of a vector of [2 x double] into a -/// 64-bit signed integer value, truncating the result when it is inexact. +/// 64-bit signed truncated (rounded toward zero) integer value. +/// +/// If a converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -3253,6 +3316,10 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) { /// Converts a vector of [4 x float] into a vector of [4 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction. @@ -3265,8 +3332,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) { return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); } -/// Converts a vector of [4 x float] into a vector of [4 x i32], -/// truncating the result when it is inexact. +/// Converts a vector of [4 x float] into four signed truncated (rounded toward +/// zero) 32-bit integers, returned in a vector of [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -4050,26 +4121,22 @@ void _mm_mfence(void); } // extern "C" #endif -/// Converts 16-bit signed integers from both 128-bit integer vector -/// operands into 8-bit signed integers, and packs the results into the -/// destination. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction. /// /// \param __a -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to a 8-bit signed integer with -/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less -/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to a 8-bit signed integer with -/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less -/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, @@ -4077,26 +4144,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); } -/// Converts 32-bit signed integers from both 128-bit integer vector -/// operands into 16-bit signed integers, and packs the results into the -/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction. /// /// \param __a -/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as -/// a signed integer and is converted to a 16-bit signed integer with -/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values -/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values /// are written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as -/// a signed integer and is converted to a 16-bit signed integer with -/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values -/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values /// are written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, @@ -4104,26 +4167,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); } -/// Converts 16-bit signed integers from both 128-bit integer vector -/// operands into 8-bit unsigned integers, and packs the results into the -/// destination. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit unsigned integers, and packs the results into +/// the destination. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0x00 +/// are saturated to 0x00. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction. /// /// \param __a -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, @@ -4742,6 +4801,78 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) { return (__m128d)__a; } +/// Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_pd(a, b, c) \ + ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + +/// Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_sd(a, b, c) \ + ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + #if defined(__cplusplus) extern "C" { #endif diff --git a/lib/include/float.h b/lib/include/float.h index 0e73bca0a2..e5c439a9d4 100644 --- a/lib/include/float.h +++ b/lib/include/float.h @@ -10,6 +10,10 @@ #ifndef __CLANG_FLOAT_H #define __CLANG_FLOAT_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're on MinGW, fall back to the system's float.h, which might have * additional definitions provided for Windows. * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx @@ -82,6 +86,18 @@ # undef DBL_HAS_SUBNORM # undef LDBL_HAS_SUBNORM # endif +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) +# undef FLT_NORM_MAX +# undef DBL_NORM_MAX +# undef LDBL_NORM_MAX +#endif +#endif + +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) +# undef INFINITY +# undef NAN #endif /* Characteristics of floating point types, C99 5.2.4.2.2 */ @@ -151,6 +167,17 @@ # define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ #endif +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) + /* C23 5.2.5.3.3p29-30 */ +# define INFINITY (__builtin_inff()) +# define NAN (__builtin_nanf("")) + /* C23 5.2.5.3.3p32 */ +# define FLT_NORM_MAX __FLT_NORM_MAX__ +# define DBL_NORM_MAX __DBL_NORM_MAX__ +# define LDBL_NORM_MAX __LDBL_NORM_MAX__ +#endif + #ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ # define FLT16_MANT_DIG __FLT16_MANT_DIG__ # define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__ @@ -165,4 +192,5 @@ # define FLT16_TRUE_MIN __FLT16_TRUE_MIN__ #endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */ +#endif /* __MVS__ */ #endif /* __CLANG_FLOAT_H */ diff --git a/lib/include/fmaintrin.h b/lib/include/fmaintrin.h index ea832fac4f..22d1a780bb 100644 --- a/lib/include/fmaintrin.h +++ b/lib/include/fmaintrin.h @@ -60,7 +60,8 @@ _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar multiply-add of the single-precision values in the /// low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -88,7 +89,8 @@ _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar multiply-add of the double-precision values in the /// low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -156,7 +158,8 @@ _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar multiply-subtract of the single-precision values in /// the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -184,7 +187,8 @@ _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar multiply-subtract of the double-precision values in /// the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -252,7 +256,8 @@ _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar negated multiply-add of the single-precision values in /// the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -280,7 +285,8 @@ _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar negated multiply-add of the double-precision values /// in the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -348,7 +354,8 @@ _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar negated multiply-subtract of the single-precision /// values in the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -376,7 +383,8 @@ _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar negated multiply-subtract of the double-precision /// values in the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -404,7 +412,8 @@ _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] @@ -430,7 +439,8 @@ _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] /// \endcode @@ -454,7 +464,8 @@ _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] @@ -480,7 +491,8 @@ _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] /// \endcode @@ -664,7 +676,8 @@ _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) /// Computes a multiply with alternating add/subtract of 256-bit vectors of /// [8 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] @@ -694,7 +707,8 @@ _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) /// Computes a multiply with alternating add/subtract of 256-bit vectors of /// [4 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] /// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128] @@ -720,7 +734,8 @@ _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) /// Computes a vector multiply with alternating add/subtract of 256-bit /// vectors of [8 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] @@ -750,7 +765,8 @@ _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) /// Computes a vector multiply with alternating add/subtract of 256-bit /// vectors of [4 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] /// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128] diff --git a/lib/include/ia32intrin.h b/lib/include/ia32intrin.h index 1b979770e1..8e65f232a0 100644 --- a/lib/include/ia32intrin.h +++ b/lib/include/ia32intrin.h @@ -26,8 +26,8 @@ #define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS #endif -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -43,8 +43,8 @@ __bsfd(int __A) { return __builtin_ctz((unsigned int)__A); } -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -90,8 +90,8 @@ _bswap(int __A) { return (int)__builtin_bswap32((unsigned int)__A); } -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -108,8 +108,8 @@ _bswap(int __A) { /// \see __bsfd #define _bit_scan_forward(A) __bsfd((A)) -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -127,8 +127,8 @@ _bswap(int __A) { #define _bit_scan_reverse(A) __bsrd((A)) #ifdef __x86_64__ -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -143,8 +143,8 @@ __bsfq(long long __A) { return (long long)__builtin_ctzll((unsigned long long)__A); } -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if input is 0. /// /// \headerfile /// @@ -159,7 +159,7 @@ __bsrq(long long __A) { return 63 - __builtin_clzll((unsigned long long)__A); } -/// Swaps the bytes in the input. Converting little endian to big endian or +/// Swaps the bytes in the input, converting little endian to big endian or /// vice versa. /// /// \headerfile @@ -175,7 +175,7 @@ __bswapq(long long __A) { return (long long)__builtin_bswap64((unsigned long long)__A); } -/// Swaps the bytes in the input. Converting little endian to big endian or +/// Swaps the bytes in the input, converting little endian to big endian or /// vice versa. /// /// \headerfile @@ -198,7 +198,7 @@ __bswapq(long long __A) { /// \headerfile /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param __A /// An unsigned 32-bit integer operand. @@ -220,7 +220,7 @@ __popcntd(unsigned int __A) /// \endcode /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param A /// An unsigned 32-bit integer operand. @@ -235,7 +235,7 @@ __popcntd(unsigned int __A) /// \headerfile /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param __A /// An unsigned 64-bit integer operand. @@ -257,7 +257,7 @@ __popcntq(unsigned long long __A) /// \endcode /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param A /// An unsigned 64-bit integer operand. @@ -268,7 +268,7 @@ __popcntq(unsigned long long __A) #endif /* __x86_64__ */ #ifdef __x86_64__ -/// Returns the program status and control \c RFLAGS register with the \c VM +/// Returns the program status-and-control \c RFLAGS register with the \c VM /// and \c RF flags cleared. /// /// \headerfile @@ -282,7 +282,7 @@ __readeflags(void) return __builtin_ia32_readeflags_u64(); } -/// Writes the specified value to the program status and control \c RFLAGS +/// Writes the specified value to the program status-and-control \c RFLAGS /// register. Reserved bits are not affected. /// /// \headerfile @@ -298,7 +298,7 @@ __writeeflags(unsigned long long __f) } #else /* !__x86_64__ */ -/// Returns the program status and control \c EFLAGS register with the \c VM +/// Returns the program status-and-control \c EFLAGS register with the \c VM /// and \c RF flags cleared. /// /// \headerfile @@ -312,7 +312,7 @@ __readeflags(void) return __builtin_ia32_readeflags_u32(); } -/// Writes the specified value to the program status and control \c EFLAGS +/// Writes the specified value to the program status-and-control \c EFLAGS /// register. Reserved bits are not affected. /// /// \headerfile @@ -328,7 +328,7 @@ __writeeflags(unsigned int __f) } #endif /* !__x86_64__ */ -/// Cast a 32-bit float value to a 32-bit unsigned integer value. +/// Casts a 32-bit float value to a 32-bit unsigned integer value. /// /// \headerfile /// @@ -337,13 +337,13 @@ __writeeflags(unsigned int __f) /// /// \param __A /// A 32-bit float value. -/// \returns a 32-bit unsigned integer containing the converted value. +/// \returns A 32-bit unsigned integer containing the converted value. static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST _castf32_u32(float __A) { return __builtin_bit_cast(unsigned int, __A); } -/// Cast a 64-bit float value to a 64-bit unsigned integer value. +/// Casts a 64-bit float value to a 64-bit unsigned integer value. /// /// \headerfile /// @@ -352,13 +352,13 @@ _castf32_u32(float __A) { /// /// \param __A /// A 64-bit float value. -/// \returns a 64-bit unsigned integer containing the converted value. +/// \returns A 64-bit unsigned integer containing the converted value. static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST _castf64_u64(double __A) { return __builtin_bit_cast(unsigned long long, __A); } -/// Cast a 32-bit unsigned integer value to a 32-bit float value. +/// Casts a 32-bit unsigned integer value to a 32-bit float value. /// /// \headerfile /// @@ -367,13 +367,13 @@ _castf64_u64(double __A) { /// /// \param __A /// A 32-bit unsigned integer value. -/// \returns a 32-bit float value containing the converted value. +/// \returns A 32-bit float value containing the converted value. static __inline__ float __DEFAULT_FN_ATTRS_CAST _castu32_f32(unsigned int __A) { return __builtin_bit_cast(float, __A); } -/// Cast a 64-bit unsigned integer value to a 64-bit float value. +/// Casts a 64-bit unsigned integer value to a 64-bit float value. /// /// \headerfile /// @@ -382,7 +382,7 @@ _castu32_f32(unsigned int __A) { /// /// \param __A /// A 64-bit unsigned integer value. -/// \returns a 64-bit float value containing the converted value. +/// \returns A 64-bit float value containing the converted value. static __inline__ double __DEFAULT_FN_ATTRS_CAST _castu64_f64(unsigned long long __A) { return __builtin_bit_cast(double, __A); @@ -470,7 +470,7 @@ __crc32q(unsigned long long __C, unsigned long long __D) } #endif /* __x86_64__ */ -/// Reads the specified performance monitoring counter. Refer to your +/// Reads the specified performance-monitoring counter. Refer to your /// processor's documentation to determine which performance counters are /// supported. /// @@ -487,7 +487,7 @@ __rdpmc(int __A) { return __builtin_ia32_rdpmc(__A); } -/// Reads the processor's time stamp counter and the \c IA32_TSC_AUX MSR +/// Reads the processor's time-stamp counter and the \c IA32_TSC_AUX MSR /// \c (0xc0000103). /// /// \headerfile @@ -495,14 +495,14 @@ __rdpmc(int __A) { /// This intrinsic corresponds to the \c RDTSCP instruction. /// /// \param __A -/// Address of where to store the 32-bit \c IA32_TSC_AUX value. -/// \returns The 64-bit value of the time stamp counter. +/// The address of where to store the 32-bit \c IA32_TSC_AUX value. +/// \returns The 64-bit value of the time-stamp counter. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __rdtscp(unsigned int *__A) { return __builtin_ia32_rdtscp(__A); } -/// Reads the processor's time stamp counter. +/// Reads the processor's time-stamp counter. /// /// \headerfile /// @@ -512,7 +512,7 @@ __rdtscp(unsigned int *__A) { /// /// This intrinsic corresponds to the \c RDTSC instruction. /// -/// \returns The 64-bit value of the time stamp counter. +/// \returns The 64-bit value of the time-stamp counter. #define _rdtsc() __rdtsc() /// Reads the specified performance monitoring counter. Refer to your diff --git a/lib/include/immintrin.h b/lib/include/immintrin.h index 27800f7a82..cd6cf09b90 100644 --- a/lib/include/immintrin.h +++ b/lib/include/immintrin.h @@ -16,281 +16,231 @@ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MMX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSSE3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__SSE4_2__) || defined(__SSE4_1__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AES__) || defined(__PCLMUL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLFLUSHOPT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLWB__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__F16C__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__) #include #endif /* No feature check desired due to internal checks */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__BMI2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__LZCNT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__POPCNT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512F__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BW__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BITALG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512CD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VPOPCNTDQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VNNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VNNI__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512DQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BITALG__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BW__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512CD__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512DQ__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512ER__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512IFMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512IFMA__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXIFMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VBMI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VBMI__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VBMI2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VBMI2__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512PF__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512FP16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512FP16__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BF16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BF16__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PKU__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__VPCLMULQDQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__VAES__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__GFNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNIINT8__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXNECONVERT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHA512__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SM3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SM4__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNIINT16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDPID__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__) /// Reads the value of the IA32_TSC_AUX MSR (0xc0000103). /// /// \headerfile @@ -304,8 +254,7 @@ _rdpid_u32(void) { } #endif // __RDPID__ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDRND__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__) /// Returns a 16-bit hardware-generated random value. /// /// \headerfile @@ -367,8 +316,7 @@ _rdrand64_step(unsigned long long *__p) } #endif /* __RDRND__ */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FSGSBASE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__) #ifdef __x86_64__ /// Reads the FS base register. /// @@ -481,8 +429,7 @@ _writegsbase_u64(unsigned long long __V) #endif #endif /* __FSGSBASE__ */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MOVBE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__) /* The structs used below are to force the load/store to be unaligned. This * is accomplished with the __packed__ attribute. The __may_alias__ prevents @@ -598,139 +545,118 @@ _storebe_i64(void * __P, long long __D) { #endif #endif /* __MOVBE */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RTM__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__) #include #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FXSR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__) #include #endif /* No feature check desired due to internal MSC_VER checks */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVEOPT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVEC__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVES__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHSTK__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__) #include #endif /* Intrinsics inside adcintrin.h are available at all times. */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__ADX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDSEED__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WBNOINVD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLDEMOTE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WAITPKG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MOVDIRI__) || defined(__MOVDIR64B__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \ + defined(__MOVDIR64B__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PCONFIG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SGX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PTWRITE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__INVPCID__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_FP16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__KL__) || defined(__WIDEKL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \ + defined(__WIDEKL__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \ + defined(__AMX_INT8__) || defined(__AMX_BF16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_COMPLEX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ defined(__AVX512VP2INTERSECT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__ENQCMD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SERIALIZE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__TSXLDTRK__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__) #include #endif diff --git a/lib/include/intrin.h b/lib/include/intrin.h index 9ebaea9fee..6308c865ca 100644 --- a/lib/include/intrin.h +++ b/lib/include/intrin.h @@ -15,8 +15,10 @@ #ifndef __INTRIN_H #define __INTRIN_H +#include + /* First include the standard intrinsics. */ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) #include #endif @@ -24,7 +26,7 @@ #include #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #include #endif @@ -131,8 +133,6 @@ void __writefsqword(unsigned long, unsigned __int64); void __writefsword(unsigned long, unsigned short); void __writemsr(unsigned long, unsigned __int64); void *_AddressOfReturnAddress(void); -unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); -unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); unsigned char _bittest(long const *, long); unsigned char _bittestandcomplement(long *, long); unsigned char _bittestandreset(long *, long); @@ -151,7 +151,6 @@ long _InterlockedExchangeAdd_HLERelease(long volatile *, long); __int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64); __int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64); void _ReadBarrier(void); -void _ReadWriteBarrier(void); unsigned int _rorx_u32(unsigned int, const unsigned int); int _sarx_i32(int, unsigned int); #if __STDC_HOSTED__ @@ -167,7 +166,7 @@ unsigned __int32 xbegin(void); void _xend(void); /* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */ -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__arm64ec__) void __addgsbyte(unsigned long, unsigned char); void __addgsdword(unsigned long, unsigned long); void __addgsqword(unsigned long, unsigned __int64); @@ -182,12 +181,6 @@ unsigned char __readgsbyte(unsigned long); unsigned long __readgsdword(unsigned long); unsigned __int64 __readgsqword(unsigned long); unsigned short __readgsword(unsigned long); -unsigned __int64 __shiftleft128(unsigned __int64 _LowPart, - unsigned __int64 _HighPart, - unsigned char _Shift); -unsigned __int64 __shiftright128(unsigned __int64 _LowPart, - unsigned __int64 _HighPart, - unsigned char _Shift); void __stosq(unsigned __int64 *, unsigned __int64, size_t); unsigned char __vmx_on(unsigned __int64 *); unsigned char __vmx_vmclear(unsigned __int64 *); @@ -236,216 +229,15 @@ unsigned __int64 _shlx_u64(unsigned __int64, unsigned int); unsigned __int64 _shrx_u64(unsigned __int64, unsigned int); __int64 __mulh(__int64, __int64); unsigned __int64 __umulh(unsigned __int64, unsigned __int64); -__int64 _mul128(__int64, __int64, __int64*); -unsigned __int64 _umul128(unsigned __int64, - unsigned __int64, - unsigned __int64*); +__int64 _mul128(__int64, __int64, __int64 *); #endif /* __x86_64__ */ -#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) - -unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); -unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); - -#endif - -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) -__int64 _InterlockedDecrement64(__int64 volatile *_Addend); -__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); -__int64 _InterlockedIncrement64(__int64 volatile *_Addend); -__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); - -#endif - -/*----------------------------------------------------------------------------*\ -|* Interlocked Exchange Add -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value); -char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value); -char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value); -short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value); -short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value); -short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value); -long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value); -long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value); -long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value); -__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Increment -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -short _InterlockedIncrement16_acq(short volatile *_Value); -short _InterlockedIncrement16_nf(short volatile *_Value); -short _InterlockedIncrement16_rel(short volatile *_Value); -long _InterlockedIncrement_acq(long volatile *_Value); -long _InterlockedIncrement_nf(long volatile *_Value); -long _InterlockedIncrement_rel(long volatile *_Value); -__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value); -__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value); -__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Decrement -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -short _InterlockedDecrement16_acq(short volatile *_Value); -short _InterlockedDecrement16_nf(short volatile *_Value); -short _InterlockedDecrement16_rel(short volatile *_Value); -long _InterlockedDecrement_acq(long volatile *_Value); -long _InterlockedDecrement_nf(long volatile *_Value); -long _InterlockedDecrement_rel(long volatile *_Value); -__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value); -__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value); -__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked And -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedAnd8_acq(char volatile *_Value, char _Mask); -char _InterlockedAnd8_nf(char volatile *_Value, char _Mask); -char _InterlockedAnd8_rel(char volatile *_Value, char _Mask); -short _InterlockedAnd16_acq(short volatile *_Value, short _Mask); -short _InterlockedAnd16_nf(short volatile *_Value, short _Mask); -short _InterlockedAnd16_rel(short volatile *_Value, short _Mask); -long _InterlockedAnd_acq(long volatile *_Value, long _Mask); -long _InterlockedAnd_nf(long volatile *_Value, long _Mask); -long _InterlockedAnd_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Bit Counting and Testing -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -unsigned char _interlockedbittestandset_acq(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandset_nf(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandset_rel(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase, - long _BitPos); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Or -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedOr8_acq(char volatile *_Value, char _Mask); -char _InterlockedOr8_nf(char volatile *_Value, char _Mask); -char _InterlockedOr8_rel(char volatile *_Value, char _Mask); -short _InterlockedOr16_acq(short volatile *_Value, short _Mask); -short _InterlockedOr16_nf(short volatile *_Value, short _Mask); -short _InterlockedOr16_rel(short volatile *_Value, short _Mask); -long _InterlockedOr_acq(long volatile *_Value, long _Mask); -long _InterlockedOr_nf(long volatile *_Value, long _Mask); -long _InterlockedOr_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Xor -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedXor8_acq(char volatile *_Value, char _Mask); -char _InterlockedXor8_nf(char volatile *_Value, char _Mask); -char _InterlockedXor8_rel(char volatile *_Value, char _Mask); -short _InterlockedXor16_acq(short volatile *_Value, short _Mask); -short _InterlockedXor16_nf(short volatile *_Value, short _Mask); -short _InterlockedXor16_rel(short volatile *_Value, short _Mask); -long _InterlockedXor_acq(long volatile *_Value, long _Mask); -long _InterlockedXor_nf(long volatile *_Value, long _Mask); -long _InterlockedXor_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Exchange -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedExchange8_acq(char volatile *_Target, char _Value); -char _InterlockedExchange8_nf(char volatile *_Target, char _Value); -char _InterlockedExchange8_rel(char volatile *_Target, char _Value); -short _InterlockedExchange16_acq(short volatile *_Target, short _Value); -short _InterlockedExchange16_nf(short volatile *_Target, short _Value); -short _InterlockedExchange16_rel(short volatile *_Target, short _Value); -long _InterlockedExchange_acq(long volatile *_Target, long _Value); -long _InterlockedExchange_nf(long volatile *_Target, long _Value); -long _InterlockedExchange_rel(long volatile *_Target, long _Value); -__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Compare Exchange -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedCompareExchange8_acq(char volatile *_Destination, - char _Exchange, char _Comparand); -char _InterlockedCompareExchange8_nf(char volatile *_Destination, - char _Exchange, char _Comparand); -char _InterlockedCompareExchange8_rel(char volatile *_Destination, - char _Exchange, char _Comparand); -short _InterlockedCompareExchange16_acq(short volatile *_Destination, - short _Exchange, short _Comparand); -short _InterlockedCompareExchange16_nf(short volatile *_Destination, - short _Exchange, short _Comparand); -short _InterlockedCompareExchange16_rel(short volatile *_Destination, - short _Exchange, short _Comparand); -long _InterlockedCompareExchange_acq(long volatile *_Destination, - long _Exchange, long _Comparand); -long _InterlockedCompareExchange_nf(long volatile *_Destination, - long _Exchange, long _Comparand); -long _InterlockedCompareExchange_rel(long volatile *_Destination, - long _Exchange, long _Comparand); -__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -#endif -#if defined(__x86_64__) || defined(__aarch64__) -unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -#endif -#if defined(__aarch64__) -unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -#endif - /*----------------------------------------------------------------------------*\ |* movs, stos \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) + +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) { @@ -514,7 +306,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst, : "memory"); } #endif -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__arm64ec__) static __inline__ void __DEFAULT_FN_ATTRS __movsq( unsigned long long *__dst, unsigned long long const *__src, size_t __n) { __asm__ __volatile__("rep movsq" @@ -533,10 +325,40 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst, /*----------------------------------------------------------------------------*\ |* Misc \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ void __DEFAULT_FN_ATTRS __halt(void) { __asm__ volatile("hlt"); } + +static inline unsigned char __inbyte(unsigned short port) { + unsigned char ret; + __asm__ __volatile__("inb %w1, %b0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline unsigned short __inword(unsigned short port) { + unsigned short ret; + __asm__ __volatile__("inw %w1, %w0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline unsigned long __indword(unsigned short port) { + unsigned long ret; + __asm__ __volatile__("inl %w1, %k0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline void __outbyte(unsigned short port, unsigned char data) { + __asm__ __volatile__("outb %b0, %w1" : : "a"(data), "Nd"(port)); +} + +static inline void __outword(unsigned short port, unsigned short data) { + __asm__ __volatile__("outw %w0, %w1" : : "a"(data), "Nd"(port)); +} + +static inline void __outdword(unsigned short port, unsigned long data) { + __asm__ __volatile__("outl %k0, %w1" : : "a"(data), "Nd"(port)); +} #endif #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) @@ -548,9 +370,10 @@ static __inline__ void __DEFAULT_FN_ATTRS __nop(void) { /*----------------------------------------------------------------------------*\ |* MS AArch64 specific \*----------------------------------------------------------------------------*/ -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) unsigned __int64 __getReg(int); long _InterlockedAdd(long volatile *Addend, long Value); +__int64 _InterlockedAdd64(__int64 volatile *Addend, __int64 Value); __int64 _ReadStatusReg(int); void _WriteStatusReg(int, __int64); @@ -582,18 +405,19 @@ unsigned int _CountLeadingOnes(unsigned long); unsigned int _CountLeadingOnes64(unsigned __int64); unsigned int _CountLeadingSigns(long); unsigned int _CountLeadingSigns64(__int64); -unsigned int _CountLeadingZeros(unsigned long); -unsigned int _CountLeadingZeros64(unsigned _int64); unsigned int _CountOneBits(unsigned long); unsigned int _CountOneBits64(unsigned __int64); -void __cdecl __prefetch(void *); +unsigned int __hlt(unsigned int, ...); + +void __cdecl __prefetch(const void *); + #endif /*----------------------------------------------------------------------------*\ |* Privileged intrinsics \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS __readmsr(unsigned long __register) { // Loads the contents of a 64-bit model specific register (MSR) specified in @@ -607,7 +431,6 @@ __readmsr(unsigned long __register) { __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register)); return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax; } -#endif static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) { unsigned __LPTRINT_TYPE__ __cr3_val; @@ -623,6 +446,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __writecr3(unsigned __INTPTR_TYPE__ __cr3_val) { __asm__ ("mov {%0, %%cr3|cr3, %0}" : : "r"(__cr3_val) : "memory"); } +#endif #ifdef __cplusplus } diff --git a/lib/include/intrin0.h b/lib/include/intrin0.h new file mode 100644 index 0000000000..866c889661 --- /dev/null +++ b/lib/include/intrin0.h @@ -0,0 +1,247 @@ +/* ===-------- intrin.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __INTRIN0_H +#define __INTRIN0_H + +#if defined(__x86_64__) && !defined(__arm64ec__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); +unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); +void _ReadWriteBarrier(void); + +#if defined(__aarch64__) || defined(__arm64ec__) +unsigned int _CountLeadingZeros(unsigned long); +unsigned int _CountLeadingZeros64(unsigned _int64); +unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif + +#ifdef __x86_64__ && !defined(__arm64ec__) +unsigned __int64 _umul128(unsigned __int64, unsigned __int64, + unsigned __int64 *); +unsigned __int64 __shiftleft128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +unsigned __int64 __shiftright128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +#endif + +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) +void _mm_pause(void); +#endif + +#if defined(__x86_64__) || defined(__aarch64__) +unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif + +#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) +unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); +unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ + defined(__aarch64__) +__int64 _InterlockedDecrement64(__int64 volatile *_Addend); +__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); +__int64 _InterlockedIncrement64(__int64 volatile *_Addend); +__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); +#endif + +#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__) +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange Add +\*----------------------------------------------------------------------------*/ +char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value); +short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value); +long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value); +__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, + __int64 _Value); +__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, + __int64 _Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Increment +\*----------------------------------------------------------------------------*/ +short _InterlockedIncrement16_acq(short volatile *_Value); +short _InterlockedIncrement16_nf(short volatile *_Value); +short _InterlockedIncrement16_rel(short volatile *_Value); +long _InterlockedIncrement_acq(long volatile *_Value); +long _InterlockedIncrement_nf(long volatile *_Value); +long _InterlockedIncrement_rel(long volatile *_Value); +__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Decrement +\*----------------------------------------------------------------------------*/ +short _InterlockedDecrement16_acq(short volatile *_Value); +short _InterlockedDecrement16_nf(short volatile *_Value); +short _InterlockedDecrement16_rel(short volatile *_Value); +long _InterlockedDecrement_acq(long volatile *_Value); +long _InterlockedDecrement_nf(long volatile *_Value); +long _InterlockedDecrement_rel(long volatile *_Value); +__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked And +\*----------------------------------------------------------------------------*/ +char _InterlockedAnd8_acq(char volatile *_Value, char _Mask); +char _InterlockedAnd8_nf(char volatile *_Value, char _Mask); +char _InterlockedAnd8_rel(char volatile *_Value, char _Mask); +short _InterlockedAnd16_acq(short volatile *_Value, short _Mask); +short _InterlockedAnd16_nf(short volatile *_Value, short _Mask); +short _InterlockedAnd16_rel(short volatile *_Value, short _Mask); +long _InterlockedAnd_acq(long volatile *_Value, long _Mask); +long _InterlockedAnd_nf(long volatile *_Value, long _Mask); +long _InterlockedAnd_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Bit Counting and Testing +\*----------------------------------------------------------------------------*/ +unsigned char _interlockedbittestandset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_rel(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase, + long _BitPos); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Or +\*----------------------------------------------------------------------------*/ +char _InterlockedOr8_acq(char volatile *_Value, char _Mask); +char _InterlockedOr8_nf(char volatile *_Value, char _Mask); +char _InterlockedOr8_rel(char volatile *_Value, char _Mask); +short _InterlockedOr16_acq(short volatile *_Value, short _Mask); +short _InterlockedOr16_nf(short volatile *_Value, short _Mask); +short _InterlockedOr16_rel(short volatile *_Value, short _Mask); +long _InterlockedOr_acq(long volatile *_Value, long _Mask); +long _InterlockedOr_nf(long volatile *_Value, long _Mask); +long _InterlockedOr_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Xor +\*----------------------------------------------------------------------------*/ +char _InterlockedXor8_acq(char volatile *_Value, char _Mask); +char _InterlockedXor8_nf(char volatile *_Value, char _Mask); +char _InterlockedXor8_rel(char volatile *_Value, char _Mask); +short _InterlockedXor16_acq(short volatile *_Value, short _Mask); +short _InterlockedXor16_nf(short volatile *_Value, short _Mask); +short _InterlockedXor16_rel(short volatile *_Value, short _Mask); +long _InterlockedXor_acq(long volatile *_Value, long _Mask); +long _InterlockedXor_nf(long volatile *_Value, long _Mask); +long _InterlockedXor_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange +\*----------------------------------------------------------------------------*/ +char _InterlockedExchange8_acq(char volatile *_Target, char _Value); +char _InterlockedExchange8_nf(char volatile *_Target, char _Value); +char _InterlockedExchange8_rel(char volatile *_Target, char _Value); +short _InterlockedExchange16_acq(short volatile *_Target, short _Value); +short _InterlockedExchange16_nf(short volatile *_Target, short _Value); +short _InterlockedExchange16_rel(short volatile *_Target, short _Value); +long _InterlockedExchange_acq(long volatile *_Target, long _Value); +long _InterlockedExchange_nf(long volatile *_Target, long _Value); +long _InterlockedExchange_rel(long volatile *_Target, long _Value); +__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Compare Exchange +\*----------------------------------------------------------------------------*/ +char _InterlockedCompareExchange8_acq(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_nf(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_rel(char volatile *_Destination, + char _Exchange, char _Comparand); +short _InterlockedCompareExchange16_acq(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_nf(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_rel(short volatile *_Destination, + short _Exchange, short _Comparand); +long _InterlockedCompareExchange_acq(long volatile *_Destination, + long _Exchange, long _Comparand); +long _InterlockedCompareExchange_nf(long volatile *_Destination, long _Exchange, + long _Comparand); +long _InterlockedCompareExchange_rel(long volatile *_Destination, + long _Exchange, long _Comparand); +__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination, + __int64 _Exchange, + __int64 _Comparand); +__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination, + __int64 _Exchange, + __int64 _Comparand); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __INTRIN0_H */ +#endif /* _MSC_VER */ diff --git a/lib/include/inttypes.h b/lib/include/inttypes.h index 1c894c4aca..5150d22f8b 100644 --- a/lib/include/inttypes.h +++ b/lib/include/inttypes.h @@ -13,6 +13,9 @@ #if !defined(_AIX) || !defined(_STD_TYPES_T) #define __CLANG_INTTYPES_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else #if defined(_MSC_VER) && _MSC_VER < 1800 #error MSVC does not have inttypes.h prior to Visual Studio 2013 @@ -94,4 +97,5 @@ #define SCNxFAST32 "x" #endif +#endif /* __MVS__ */ #endif /* __CLANG_INTTYPES_H */ diff --git a/lib/include/iso646.h b/lib/include/iso646.h index e0a20c6f18..b53fcd9b4e 100644 --- a/lib/include/iso646.h +++ b/lib/include/iso646.h @@ -9,6 +9,9 @@ #ifndef __ISO646_H #define __ISO646_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else #ifndef __cplusplus #define and && @@ -24,4 +27,5 @@ #define xor_eq ^= #endif +#endif /* __MVS__ */ #endif /* __ISO646_H */ diff --git a/lib/include/keylockerintrin.h b/lib/include/keylockerintrin.h index 1994ac4207..f76e91b4d4 100644 --- a/lib/include/keylockerintrin.h +++ b/lib/include/keylockerintrin.h @@ -28,8 +28,7 @@ #ifndef _KEYLOCKERINTRIN_H #define _KEYLOCKERINTRIN_H -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__KL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS \ @@ -327,11 +326,9 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__KL__) */ +#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WIDEKL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS \ @@ -524,7 +521,7 @@ _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__WIDEKL__) */ +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \ + */ #endif /* _KEYLOCKERINTRIN_H */ diff --git a/lib/include/limits.h b/lib/include/limits.h index 15e6bbe0ab..56dffe5684 100644 --- a/lib/include/limits.h +++ b/lib/include/limits.h @@ -9,6 +9,10 @@ #ifndef __CLANG_LIMITS_H #define __CLANG_LIMITS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* The system's limits.h may, in turn, try to #include_next GCC's limits.h. Avert this #include_next madness. */ #if defined __GNUC__ && !defined _GCC_LIMITS_H_ @@ -122,4 +126,5 @@ #define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) #endif +#endif /* __MVS__ */ #endif /* __CLANG_LIMITS_H */ diff --git a/lib/include/llvm_libc_wrappers/assert.h b/lib/include/llvm_libc_wrappers/assert.h index de650ca844..610ed96a45 100644 --- a/lib/include/llvm_libc_wrappers/assert.h +++ b/lib/include/llvm_libc_wrappers/assert.h @@ -1,4 +1,4 @@ -//===-- Wrapper for C standard assert.h declarations on the GPU ------------===// +//===-- Wrapper for C standard assert.h declarations on the GPU -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/lib/include/mm3dnow.h b/lib/include/mm3dnow.h index 22ab13aa33..afffba3a9c 100644 --- a/lib/include/mm3dnow.h +++ b/lib/include/mm3dnow.h @@ -7,151 +7,16 @@ *===-----------------------------------------------------------------------=== */ +// 3dNow intrinsics are no longer supported. + #ifndef _MM3DNOW_H_INCLUDED #define _MM3DNOW_H_INCLUDED +#ifndef _CLANG_DISABLE_CRT_DEPRECATION_WARNINGS +#warning "The header is deprecated, and 3dNow! intrinsics are unsupported. For other intrinsics, include , instead." +#endif + #include #include -typedef float __v2sf __attribute__((__vector_size__(8))); - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"), __min_vector_width__(64))) - -static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("3dnow"))) -_m_femms(void) { - __builtin_ia32_femms(); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pavgusb(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pf2id(__m64 __m) { - return (__m64)__builtin_ia32_pf2id((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfadd(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpeq(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpge(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpgt(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmax(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmin(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmul(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcp(__m64 __m) { - return (__m64)__builtin_ia32_pfrcp((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcpit1(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcpit2(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrsqrt(__m64 __m) { - return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrsqrtit1(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfsub(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfsubr(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pi2fd(__m64 __m) { - return (__m64)__builtin_ia32_pi2fd((__v2si)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pmulhrw(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2); -} - -/* Handle the 3dnowa instructions here. */ -#undef __DEFAULT_FN_ATTRS -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"), __min_vector_width__(64))) - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pf2iw(__m64 __m) { - return (__m64)__builtin_ia32_pf2iw((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfnacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfpnacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pi2fw(__m64 __m) { - return (__m64)__builtin_ia32_pi2fw((__v2si)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pswapdsf(__m64 __m) { - return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pswapdsi(__m64 __m) { - return (__m64)__builtin_ia32_pswapdsi((__v2si)__m); -} - -#undef __DEFAULT_FN_ATTRS - #endif diff --git a/lib/include/mmintrin.h b/lib/include/mmintrin.h index 08849f0107..4e154e2d85 100644 --- a/lib/include/mmintrin.h +++ b/lib/include/mmintrin.h @@ -105,28 +105,23 @@ _mm_cvtm64_si64(__m64 __m) return (long long)__m; } -/// Converts 16-bit signed integers from both 64-bit integer vector -/// parameters of [4 x i16] into 8-bit signed integer values, and constructs -/// a 64-bit integer vector of [8 x i8] as the result. Positive values -/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80 -/// are saturated to 0x80. +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit signed integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// /// This intrinsic corresponds to the PACKSSWB instruction. /// /// \param __m1 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit signed integer with -/// saturation. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. The converted -/// [4 x i8] values are written to the lower 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit signed integer with -/// saturation. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. The converted -/// [4 x i8] values are written to the upper 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -135,28 +130,23 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); } -/// Converts 32-bit signed integers from both 64-bit integer vector -/// parameters of [2 x i32] into 16-bit signed integer values, and constructs -/// a 64-bit integer vector of [4 x i16] as the result. Positive values -/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than -/// 0x8000 are saturated to 0x8000. +/// Converts, with saturation, 32-bit signed integers from both 64-bit integer +/// vector parameters of [2 x i32] into 16-bit signed integer values, and +/// constructs a 64-bit integer vector of [4 x i16] as the result. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// /// This intrinsic corresponds to the PACKSSDW instruction. /// /// \param __m1 -/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a -/// 32-bit signed integer and is converted to a 16-bit signed integer with -/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. The converted -/// [2 x i16] values are written to the lower 32 bits of the result. +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a -/// 32-bit signed integer and is converted to a 16-bit signed integer with -/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. The converted -/// [2 x i16] values are written to the upper 32 bits of the result. +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -165,28 +155,23 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); } -/// Converts 16-bit signed integers from both 64-bit integer vector -/// parameters of [4 x i16] into 8-bit unsigned integer values, and -/// constructs a 64-bit integer vector of [8 x i8] as the result. Values -/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated -/// to 0. +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit unsigned integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0 are +/// saturated to 0. /// /// \headerfile /// /// This intrinsic corresponds to the PACKUSWB instruction. /// /// \param __m1 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0 are saturated to 0. The converted [4 x i8] values are written to -/// the lower 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0 are saturated to 0. The converted [4 x i8] values are written to -/// the upper 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -400,11 +385,13 @@ _mm_add_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); } -/// Adds each 8-bit signed integer element of the first 64-bit integer -/// vector of [8 x i8] to the corresponding 8-bit signed integer element of -/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than -/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to -/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8]. +/// Adds, with saturation, each 8-bit signed integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit signed +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. The results are packed into a +/// 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -422,12 +409,13 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); } -/// Adds each 16-bit signed integer element of the first 64-bit integer -/// vector of [4 x i16] to the corresponding 16-bit signed integer element of -/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than -/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are -/// saturated to 0x8000. The results are packed into a 64-bit integer vector -/// of [4 x i16]. +/// Adds, with saturation, each 16-bit signed integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit signed +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. The results are packed into a +/// 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -445,11 +433,12 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); } -/// Adds each 8-bit unsigned integer element of the first 64-bit integer -/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of -/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are -/// saturated to 0xFF. The results are packed into a 64-bit integer vector of -/// [8 x i8]. +/// Adds, with saturation, each 8-bit unsigned integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit unsigned +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Sums greater than 0xFF are saturated to 0xFF. The results are packed +/// into a 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -467,11 +456,12 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); } -/// Adds each 16-bit unsigned integer element of the first 64-bit integer -/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element -/// of the second 64-bit integer vector of [4 x i16]. Sums greater than -/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit -/// integer vector of [4 x i16]. +/// Adds, with saturation, each 16-bit unsigned integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit unsigned +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Sums greater than 0xFFFF are saturated to 0xFFFF. The results are packed +/// into a 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -552,12 +542,13 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); } -/// Subtracts each 8-bit signed integer element of the second 64-bit -/// integer vector of [8 x i8] from the corresponding 8-bit signed integer -/// element of the first 64-bit integer vector of [8 x i8]. Positive results -/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80 -/// are saturated to 0x80. The results are packed into a 64-bit integer -/// vector of [8 x i8]. +/// Subtracts, with saturation, each 8-bit signed integer element of the second +/// 64-bit integer vector of [8 x i8] from the corresponding 8-bit signed +/// integer element of the first 64-bit integer vector of [8 x i8]. +/// +/// Positive results greater than 0x7F are saturated to 0x7F. Negative +/// results less than 0x80 are saturated to 0x80. The results are packed +/// into a 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -575,12 +566,13 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); } -/// Subtracts each 16-bit signed integer element of the second 64-bit -/// integer vector of [4 x i16] from the corresponding 16-bit signed integer -/// element of the first 64-bit integer vector of [4 x i16]. Positive results -/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than -/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit -/// integer vector of [4 x i16]. +/// Subtracts, with saturation, each 16-bit signed integer element of the +/// second 64-bit integer vector of [4 x i16] from the corresponding 16-bit +/// signed integer element of the first 64-bit integer vector of [4 x i16]. +/// +/// Positive results greater than 0x7FFF are saturated to 0x7FFF. Negative +/// results less than 0x8000 are saturated to 0x8000. The results are packed +/// into a 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -1149,7 +1141,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2) /// [8 x i8] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFF for true. +/// Each comparison returns 0 for false, 0xFF for true. /// /// \headerfile /// @@ -1171,7 +1163,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) /// [4 x i16] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFF for true. +/// Each comparison returns 0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -1193,7 +1185,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) /// [2 x i32] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -1215,7 +1207,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) /// [8 x i8] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFF for true. +/// Each comparison returns 0 for false, 0xFF for true. /// /// \headerfile /// @@ -1237,7 +1229,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) /// [4 x i16] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFF for true. +/// Each comparison returns 0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -1259,7 +1251,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) /// [2 x i32] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// diff --git a/lib/include/module.modulemap b/lib/include/module.modulemap index 56a13f69bc..9ffc249c8d 100644 --- a/lib/include/module.modulemap +++ b/lib/include/module.modulemap @@ -44,7 +44,6 @@ module _Builtin_intrinsics [system] [extern_c] { textual header "avxintrin.h" textual header "avx2intrin.h" textual header "avx512fintrin.h" - textual header "avx512erintrin.h" textual header "fmaintrin.h" header "x86intrin.h" @@ -203,6 +202,11 @@ module _Builtin_stdarg [system] { export * } + explicit module header_macro { + header "__stdarg_header_macro.h" + export * + } + explicit module va_arg { header "__stdarg_va_arg.h" export * @@ -232,6 +236,10 @@ module _Builtin_stdbool [system] { module _Builtin_stddef [system] { textual header "stddef.h" + explicit module header_macro { + header "__stddef_header_macro.h" + export * + } // __stddef_max_align_t.h is always in this module, even if // -fbuiltin-headers-in-system-modules is passed. explicit module max_align_t { @@ -315,3 +323,8 @@ module opencl_c { header "opencl-c.h" header "opencl-c-base.h" } + +module ptrauth { + header "ptrauth.h" + export * +} diff --git a/lib/include/opencl-c-base.h b/lib/include/opencl-c-base.h index 2494f6213f..786678b9d8 100644 --- a/lib/include/opencl-c-base.h +++ b/lib/include/opencl-c-base.h @@ -46,6 +46,10 @@ #define __opencl_c_ext_fp32_global_atomic_min_max 1 #define __opencl_c_ext_fp32_local_atomic_min_max 1 #define __opencl_c_ext_image_raw10_raw12 1 +#define cl_khr_kernel_clock 1 +#define __opencl_c_kernel_clock_scope_device 1 +#define __opencl_c_kernel_clock_scope_work_group 1 +#define __opencl_c_kernel_clock_scope_sub_group 1 #endif // defined(__SPIR__) || defined(__SPIRV__) #endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200) diff --git a/lib/include/opencl-c.h b/lib/include/opencl-c.h index 288bb18bc6..20719b74b6 100644 --- a/lib/include/opencl-c.h +++ b/lib/include/opencl-c.h @@ -17314,6 +17314,21 @@ half __ovld __conv sub_group_clustered_rotate(half, int, uint); #endif // cl_khr_fp16 #endif // cl_khr_subgroup_rotate +#if defined(cl_khr_kernel_clock) +#if defined(__opencl_c_kernel_clock_scope_device) +ulong __ovld clock_read_device(); +uint2 __ovld clock_read_hilo_device(); +#endif // __opencl_c_kernel_clock_scope_device +#if defined(__opencl_c_kernel_clock_scope_work_group) +ulong __ovld clock_read_work_group(); +uint2 __ovld clock_read_hilo_work_group(); +#endif // __opencl_c_kernel_clock_scope_work_group +#if defined(__opencl_c_kernel_clock_scope_sub_group) +ulong __ovld clock_read_sub_group(); +uint2 __ovld clock_read_hilo_sub_group(); +#endif // __opencl_c_kernel_clock_scope_sub_group +#endif // cl_khr_kernel_clock + #if defined(cl_intel_subgroups) // Intel-Specific Sub Group Functions float __ovld __conv intel_sub_group_shuffle( float , uint ); diff --git a/lib/include/prfchwintrin.h b/lib/include/prfchwintrin.h index d2f91aa012..eaea5f3cf8 100644 --- a/lib/include/prfchwintrin.h +++ b/lib/include/prfchwintrin.h @@ -8,16 +8,17 @@ */ #if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED) -#error "Never use directly; include or instead." +#error "Never use directly; include instead." #endif #ifndef __PRFCHWINTRIN_H #define __PRFCHWINTRIN_H /// Loads a memory sequence containing the specified memory address into -/// all data cache levels. The cache-coherency state is set to exclusive. -/// Data can be read from and written to the cache line without additional -/// delay. +/// all data cache levels. +/// +/// The cache-coherency state is set to exclusive. Data can be read from +/// and written to the cache line without additional delay. /// /// \headerfile /// @@ -32,10 +33,11 @@ _m_prefetch(void *__P) } /// Loads a memory sequence containing the specified memory address into -/// the L1 data cache and sets the cache-coherency to modified. This -/// provides a hint to the processor that the cache line will be modified. -/// It is intended for use when the cache line will be written to shortly -/// after the prefetch is performed. +/// the L1 data cache and sets the cache-coherency state to modified. +/// +/// This provides a hint to the processor that the cache line will be +/// modified. It is intended for use when the cache line will be written to +/// shortly after the prefetch is performed. /// /// Note that the effect of this intrinsic is dependent on the processor /// implementation. diff --git a/lib/include/ptrauth.h b/lib/include/ptrauth.h new file mode 100644 index 0000000000..154b599862 --- /dev/null +++ b/lib/include/ptrauth.h @@ -0,0 +1,330 @@ +/*===---- ptrauth.h - Pointer authentication -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PTRAUTH_H +#define __PTRAUTH_H + +typedef enum { + ptrauth_key_asia = 0, + ptrauth_key_asib = 1, + ptrauth_key_asda = 2, + ptrauth_key_asdb = 3, + + /* A process-independent key which can be used to sign code pointers. */ + ptrauth_key_process_independent_code = ptrauth_key_asia, + + /* A process-specific key which can be used to sign code pointers. */ + ptrauth_key_process_dependent_code = ptrauth_key_asib, + + /* A process-independent key which can be used to sign data pointers. */ + ptrauth_key_process_independent_data = ptrauth_key_asda, + + /* A process-specific key which can be used to sign data pointers. */ + ptrauth_key_process_dependent_data = ptrauth_key_asdb, + + /* The key used to sign return addresses on the stack. + The extra data is based on the storage address of the return address. + On AArch64, that is always the storage address of the return address + 8 + (or, in other words, the value of the stack pointer on function entry) */ + ptrauth_key_return_address = ptrauth_key_process_dependent_code, + + /* The key used to sign C function pointers. + The extra data is always 0. */ + ptrauth_key_function_pointer = ptrauth_key_process_independent_code, + + /* The key used to sign C++ v-table pointers. + The extra data is always 0. */ + ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data, + + /* Other pointers signed under the ABI use private ABI rules. */ + +} ptrauth_key; + +/* An integer type of the appropriate size for a discriminator argument. */ +typedef __UINTPTR_TYPE__ ptrauth_extra_data_t; + +/* An integer type of the appropriate size for a generic signature. */ +typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t; + +/* A signed pointer value embeds the original pointer together with + a signature that attests to the validity of that pointer. Because + this signature must use only "spare" bits of the pointer, a + signature's validity is probabilistic in practice: it is unlikely + but still plausible that an invalidly-derived signature will + somehow equal the correct signature and therefore successfully + authenticate. Nonetheless, this scheme provides a strong degree + of protection against certain kinds of attacks. */ + +/* Authenticating a pointer that was not signed with the given key + and extra-data value will (likely) fail by trapping. */ + +/* The null function pointer is always the all-zero bit pattern. + Signing an all-zero bit pattern will embed a (likely) non-zero + signature in the result, and so the result will not seem to be + a null function pointer. Authenticating this value will yield + a null function pointer back. However, authenticating an + all-zero bit pattern will probably fail, because the + authentication will expect a (likely) non-zero signature to + embedded in the value. + + Because of this, if a pointer may validly be null, you should + check for null before attempting to authenticate it with one + of these intrinsics. This is not necessary when using the + __ptrauth qualifier; the compiler will perform this check + automatically. */ + +#if __has_feature(ptrauth_intrinsics) + +/* Strip the signature from a value without authenticating it. + + If the value is a function pointer, the result will not be a + legal function pointer because of the missing signature, and + attempting to call it will result in an authentication failure. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The result will have the same type as the original value. */ +#define ptrauth_strip(__value, __key) __builtin_ptrauth_strip(__value, __key) + +/* Blend a constant discriminator into the given pointer-like value + to form a new discriminator. Not all bits of the inputs are + guaranteed to contribute to the result. + + On arm64e, the integer must fall within the range of a uint16_t; + other bits may be ignored. + + For the purposes of ptrauth_sign_constant, the result of calling + this function is considered a constant expression if the arguments + are constant. Some restrictions may be imposed on the pointer. + + The first argument must be an expression of pointer type. + The second argument must be an expression of integer type. + The result will have type uintptr_t. */ +#define ptrauth_blend_discriminator(__pointer, __integer) \ + __builtin_ptrauth_blend_discriminator(__pointer, __integer) + +/* Return a signed pointer for a constant address in a manner which guarantees + a non-attackable sequence. + + The value must be a constant expression of pointer type which evaluates to + a non-null pointer. + The key must be a constant expression of type ptrauth_key. + The extra data must be a constant expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This can be used in constant expressions. */ +#define ptrauth_sign_constant(__value, __key, __data) \ + __builtin_ptrauth_sign_constant(__value, __key, __data) + +/* Add a signature to the given pointer value using a specific key, + using the given extra data as a salt to the signing process. + + This operation does not authenticate the original value and is + therefore potentially insecure if an attacker could possibly + control that value. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. */ +#define ptrauth_sign_unauthenticated(__value, __key, __data) \ + __builtin_ptrauth_sign_unauthenticated(__value, __key, __data) + +/* Authenticate a pointer using one scheme and resign it using another. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. + + Do not pass a null pointer to this function. A null pointer + will not successfully authenticate. + + This operation traps if the authentication fails. */ +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) \ + __builtin_ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) + +/* Authenticate a pointer using one scheme and resign it as a C + function pointer. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. Additionally, if this + expression is used syntactically as the function expression in a + call, only a single authentication will be performed. */ +#define ptrauth_auth_function(__value, __old_key, __old_data) \ + ptrauth_auth_and_resign(__value, __old_key, __old_data, \ + ptrauth_key_function_pointer, 0) + +/* Authenticate a data pointer. + + The value must be an expression of non-function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation traps if the authentication fails. */ +#define ptrauth_auth_data(__value, __old_key, __old_data) \ + __builtin_ptrauth_auth(__value, __old_key, __old_data) + +/* Compute a constant discriminator from the given string. + + The argument must be a string literal of char character type. The result + has type ptrauth_extra_data_t. + + The result value is never zero and always within range for both the + __ptrauth qualifier and ptrauth_blend_discriminator. + + This can be used in constant expressions. +*/ +#define ptrauth_string_discriminator(__string) \ + __builtin_ptrauth_string_discriminator(__string) + +/* Compute a constant discriminator from the given type. + + The result can be used as the second argument to + ptrauth_blend_discriminator or the third argument to the + __ptrauth qualifier. It has type size_t. + + If the type is a C++ member function pointer type, the result is + the discriminator used to signed member function pointers of that + type. If the type is a function, function pointer, or function + reference type, the result is the discriminator used to sign + functions of that type. It is ill-formed to use this macro with any + other type. + + A call to this function is an integer constant expression. */ +#define ptrauth_type_discriminator(__type) \ + __builtin_ptrauth_type_discriminator(__type) + +/* Compute a signature for the given pair of pointer-sized values. + The order of the arguments is significant. + + Like a pointer signature, the resulting signature depends on + private key data and therefore should not be reliably reproducible + by attackers. That means that this can be used to validate the + integrity of arbitrary data by storing a signature for that data + alongside it, then checking that the signature is still valid later. + Data which exceeds two pointers in size can be signed by either + computing a tree of generic signatures or just signing an ordinary + cryptographic hash of the data. + + The result has type ptrauth_generic_signature_t. However, it may + not have as many bits of entropy as that type's width would suggest; + some implementations are known to compute a compressed signature as + if the arguments were a pointer and a discriminator. + + The arguments must be either pointers or integers; if integers, they + will be coerce to uintptr_t. */ +#define ptrauth_sign_generic_data(__value, __data) \ + __builtin_ptrauth_sign_generic_data(__value, __data) + +/* C++ vtable pointer signing class attribute */ +#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \ + extra_discrimination...) \ + [[clang::ptrauth_vtable_pointer(key, address_discrimination, \ + extra_discrimination)]] + +#else + +#define ptrauth_strip(__value, __key) \ + ({ \ + (void)__key; \ + __value; \ + }) + +#define ptrauth_blend_discriminator(__pointer, __integer) \ + ({ \ + (void)__pointer; \ + (void)__integer; \ + ((ptrauth_extra_data_t)0); \ + }) + +#define ptrauth_sign_constant(__value, __key, __data) \ + ({ \ + (void)__key; \ + (void)__data; \ + __value; \ + }) + +#define ptrauth_sign_unauthenticated(__value, __key, __data) \ + ({ \ + (void)__key; \ + (void)__data; \ + __value; \ + }) + +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + (void)__new_key; \ + (void)__new_data; \ + __value; \ + }) + +#define ptrauth_auth_function(__value, __old_key, __old_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + __value; \ + }) + +#define ptrauth_auth_data(__value, __old_key, __old_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + __value; \ + }) + +#define ptrauth_string_discriminator(__string) \ + ({ \ + (void)__string; \ + ((ptrauth_extra_data_t)0); \ + }) + +#define ptrauth_type_discriminator(__type) ((ptrauth_extra_data_t)0) + +#define ptrauth_sign_generic_data(__value, __data) \ + ({ \ + (void)__value; \ + (void)__data; \ + ((ptrauth_generic_signature_t)0); \ + }) + + +#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \ + extra_discrimination...) + +#endif /* __has_feature(ptrauth_intrinsics) */ + +#endif /* __PTRAUTH_H */ diff --git a/lib/include/riscv_vector.h b/lib/include/riscv_vector.h index 083a135877..c99ceb8021 100644 --- a/lib/include/riscv_vector.h +++ b/lib/include/riscv_vector.h @@ -14,10 +14,6 @@ #include #include -#ifndef __riscv_vector -#error "Vector intrinsics require the vector extension." -#endif - #ifdef __cplusplus extern "C" { #endif diff --git a/lib/include/sifive_vector.h b/lib/include/sifive_vector.h index 42d7224db6..4e67ad6fca 100644 --- a/lib/include/sifive_vector.h +++ b/lib/include/sifive_vector.h @@ -13,4 +13,106 @@ #pragma clang riscv intrinsic sifive_vector +#define __riscv_sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 6, vl) +#define __riscv_sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 7, vl) +#define __riscv_sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 0, vl) +#define __riscv_sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 1, vl) +#define __riscv_sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 2, vl) +#define __riscv_sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 3, vl) + +#define __riscv_sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 7, vl) +#define __riscv_sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 0, vl) +#define __riscv_sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 1, vl) +#define __riscv_sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 2, vl) +#define __riscv_sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 3, vl) + +#define __riscv_sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 0, vl) +#define __riscv_sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 1, vl) +#define __riscv_sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 2, vl) +#define __riscv_sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 3, vl) + +#define __riscv_sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 7, vl) +#define __riscv_sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 6, vl) +#define __riscv_sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 0, vl) +#define __riscv_sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 1, vl) +#define __riscv_sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 2, vl) +#define __riscv_sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 3, vl) + +#define __riscv_sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 7, vl) +#define __riscv_sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 0, vl) +#define __riscv_sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 1, vl) +#define __riscv_sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 2, vl) +#define __riscv_sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 3, vl) + +#define __riscv_sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 0, vl) +#define __riscv_sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 1, vl) +#define __riscv_sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 2, vl) +#define __riscv_sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 3, vl) + +#if __riscv_v_elen >= 64 +#define __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 5, vl) +#define __riscv_sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 6, vl) +#define __riscv_sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 7, vl) + +#define __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 5, vl) +#define __riscv_sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 6, vl) +#define __riscv_sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 7, vl) + +#define __riscv_sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 0, vl) +#define __riscv_sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 1, vl) +#define __riscv_sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 2, vl) +#define __riscv_sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 3, vl) + +#if __riscv_xlen >= 64 +#define __riscv_sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 0, vl) +#define __riscv_sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 1, vl) +#define __riscv_sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 2, vl) +#define __riscv_sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 3, vl) +#endif +#endif + #endif //_SIFIVE_VECTOR_H_ diff --git a/lib/include/smmintrin.h b/lib/include/smmintrin.h index 005d7db9c3..b3fec474e3 100644 --- a/lib/include/smmintrin.h +++ b/lib/include/smmintrin.h @@ -1188,6 +1188,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M, /// Compares each of the corresponding 64-bit values of the 128-bit /// integer vectors for equality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VPCMPEQQ / PCMPEQQ instruction. @@ -1431,8 +1433,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { } /* SSE4 Pack with Unsigned Saturation. */ -/// Converts 32-bit signed integers from both 128-bit integer vector -/// operands into 16-bit unsigned integers, and returns the packed result. +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit unsigned integers, and returns the packed +/// result. +/// /// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than /// 0x0000 are saturated to 0x0000. /// @@ -1441,17 +1445,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { /// This intrinsic corresponds to the VPACKUSDW / PACKUSDW instruction. /// /// \param __V1 -/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a -/// signed integer and is converted to a 16-bit unsigned integer with -/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values -/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values -/// are written to the lower 64 bits of the result. +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the lower 64 bits of the result. /// \param __V2 -/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a -/// signed integer and is converted to a 16-bit unsigned integer with -/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values -/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values -/// are written to the higher 64 bits of the result. +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, __m128i __V2) { @@ -2305,6 +2303,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) { /// integer vectors to determine if the values in the first operand are /// greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VPCMPGTQ / PCMPGTQ instruction. diff --git a/lib/include/stdalign.h b/lib/include/stdalign.h index 158508e65d..56cdfa52d4 100644 --- a/lib/include/stdalign.h +++ b/lib/include/stdalign.h @@ -10,6 +10,10 @@ #ifndef __STDALIGN_H #define __STDALIGN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__cplusplus) || \ (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L) #ifndef __cplusplus @@ -21,4 +25,5 @@ #define __alignof_is_defined 1 #endif /* __STDC_VERSION__ */ +#endif /* __MVS__ */ #endif /* __STDALIGN_H */ diff --git a/lib/include/stdarg.h b/lib/include/stdarg.h index 94b066566f..6203d7a600 100644 --- a/lib/include/stdarg.h +++ b/lib/include/stdarg.h @@ -14,29 +14,24 @@ * need to use some of its interfaces. Otherwise this header provides all of * the expected interfaces. * - * When clang modules are enabled, this header is a textual header. It ignores - * its header guard so that multiple submodules can export its interfaces. - * Take module SM with submodules A and B, whose headers both include stdarg.h - * When SM.A builds, __STDARG_H will be defined. When SM.B builds, the - * definition from SM.A will leak when building without local submodule - * visibility. stdarg.h wouldn't include any of its implementation headers, and - * SM.B wouldn't import any of the stdarg modules, and SM.B's `export *` - * wouldn't export any stdarg interfaces as expected. However, since stdarg.h - * ignores its header guard when building with modules, it all works as - * expected. - * - * When clang modules are not enabled, the header guards can function in the - * normal simple fashion. + * When clang modules are enabled, this header is a textual header to support + * the multiple include behavior. As such, it doesn't directly declare anything + * so that it doesn't add duplicate declarations to all of its includers' + * modules. */ -#if !defined(__STDARG_H) || __has_feature(modules) || \ - defined(__need___va_list) || defined(__need_va_list) || \ - defined(__need_va_arg) || defined(__need___va_copy) || \ - defined(__need_va_copy) +#if defined(__MVS__) && __has_include_next() +#undef __need___va_list +#undef __need_va_list +#undef __need_va_arg +#undef __need___va_copy +#undef __need_va_copy +#include <__stdarg_header_macro.h> +#include_next +#else #if !defined(__need___va_list) && !defined(__need_va_list) && \ !defined(__need_va_arg) && !defined(__need___va_copy) && \ !defined(__need_va_copy) -#define __STDARG_H #define __need___va_list #define __need_va_list #define __need_va_arg @@ -49,6 +44,7 @@ !defined(__STRICT_ANSI__) #define __need_va_copy #endif +#include <__stdarg_header_macro.h> #endif #ifdef __need___va_list @@ -76,4 +72,4 @@ #undef __need_va_copy #endif /* defined(__need_va_copy) */ -#endif +#endif /* __MVS__ */ diff --git a/lib/include/stdatomic.h b/lib/include/stdatomic.h index 521c473dd1..1991351f9e 100644 --- a/lib/include/stdatomic.h +++ b/lib/include/stdatomic.h @@ -16,7 +16,7 @@ * Exclude the MSVC path as well as the MSVC header as of the 14.31.30818 * explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback * to the clang resource header until that is fully supported. The - * `stdatomic.h` header requires C++ 23 or newer. + * `stdatomic.h` header requires C++23 or newer. */ #if __STDC_HOSTED__ && \ __has_include_next() && \ @@ -35,6 +35,9 @@ extern "C" { #define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L +#define ATOMIC_CHAR8_T_LOCK_FREE __CLANG_ATOMIC_CHAR8_T_LOCK_FREE +#endif #define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE @@ -104,6 +107,9 @@ typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L +typedef _Atomic(unsigned char) atomic_char8_t; +#endif typedef _Atomic(uint_least16_t) atomic_char16_t; typedef _Atomic(uint_least32_t) atomic_char32_t; typedef _Atomic(wchar_t) atomic_wchar_t; @@ -166,7 +172,11 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t; typedef struct atomic_flag { atomic_bool _Value; } atomic_flag; +#ifdef __cplusplus +#define ATOMIC_FLAG_INIT {false} +#else #define ATOMIC_FLAG_INIT { 0 } +#endif /* These should be provided by the libc implementation. */ #ifdef __cplusplus diff --git a/lib/include/stdbool.h b/lib/include/stdbool.h index 9406aab0ca..dfaad2b65a 100644 --- a/lib/include/stdbool.h +++ b/lib/include/stdbool.h @@ -12,6 +12,10 @@ #define __bool_true_false_are_defined 1 +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L /* FIXME: We should be issuing a deprecation warning here, but cannot yet due * to system headers which include this header file unconditionally. @@ -31,4 +35,5 @@ #endif #endif +#endif /* __MVS__ */ #endif /* __STDBOOL_H */ diff --git a/lib/include/stddef.h b/lib/include/stddef.h index e0ad7b8d17..99b275aebf 100644 --- a/lib/include/stddef.h +++ b/lib/include/stddef.h @@ -14,34 +14,32 @@ * need to use some of its interfaces. Otherwise this header provides all of * the expected interfaces. * - * When clang modules are enabled, this header is a textual header. It ignores - * its header guard so that multiple submodules can export its interfaces. - * Take module SM with submodules A and B, whose headers both include stddef.h - * When SM.A builds, __STDDEF_H will be defined. When SM.B builds, the - * definition from SM.A will leak when building without local submodule - * visibility. stddef.h wouldn't include any of its implementation headers, and - * SM.B wouldn't import any of the stddef modules, and SM.B's `export *` - * wouldn't export any stddef interfaces as expected. However, since stddef.h - * ignores its header guard when building with modules, it all works as - * expected. - * - * When clang modules are not enabled, the header guards can function in the - * normal simple fashion. + * When clang modules are enabled, this header is a textual header to support + * the multiple include behavior. As such, it doesn't directly declare anything + * so that it doesn't add duplicate declarations to all of its includers' + * modules. */ -#if !defined(__STDDEF_H) || __has_feature(modules) || \ - (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1) || \ - defined(__need_ptrdiff_t) || defined(__need_size_t) || \ - defined(__need_rsize_t) || defined(__need_wchar_t) || \ - defined(__need_NULL) || defined(__need_nullptr_t) || \ - defined(__need_unreachable) || defined(__need_max_align_t) || \ - defined(__need_offsetof) || defined(__need_wint_t) +#if defined(__MVS__) && __has_include_next() +#undef __need_ptrdiff_t +#undef __need_size_t +#undef __need_rsize_t +#undef __need_wchar_t +#undef __need_NULL +#undef __need_nullptr_t +#undef __need_unreachable +#undef __need_max_align_t +#undef __need_offsetof +#undef __need_wint_t +#include <__stddef_header_macro.h> +#include_next + +#else #if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \ !defined(__need_rsize_t) && !defined(__need_wchar_t) && \ !defined(__need_NULL) && !defined(__need_nullptr_t) && \ !defined(__need_unreachable) && !defined(__need_max_align_t) && \ !defined(__need_offsetof) && !defined(__need_wint_t) -#define __STDDEF_H #define __need_ptrdiff_t #define __need_size_t /* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is @@ -50,7 +48,24 @@ #define __need_rsize_t #endif #define __need_wchar_t +#if !defined(__STDDEF_H) || __has_feature(modules) +/* + * __stddef_null.h is special when building without modules: if __need_NULL is + * set, then it will unconditionally redefine NULL. To avoid stepping on client + * definitions of NULL, __need_NULL should only be set the first time this + * header is included, that is when __STDDEF_H is not defined. However, when + * building with modules, this header is a textual header and needs to + * unconditionally include __stdef_null.h to support multiple submodules + * exporting _Builtin_stddef.null. Take module SM with submodules A and B, whose + * headers both include stddef.h When SM.A builds, __STDDEF_H will be defined. + * When SM.B builds, the definition from SM.A will leak when building without + * local submodule visibility. stddef.h wouldn't include __stddef_null.h, and + * SM.B wouldn't import _Builtin_stddef.null, and SM.B's `export *` wouldn't + * export NULL as expected. When building with modules, always include + * __stddef_null.h so that everything works as expected. + */ #define __need_NULL +#endif #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ defined(__cplusplus) #define __need_nullptr_t @@ -66,6 +81,7 @@ /* wint_t is provided by and not . It's here * for compatibility, but must be explicitly requested. Therefore * __need_wint_t is intentionally not defined here. */ +#include <__stddef_header_macro.h> #endif #if defined(__need_ptrdiff_t) @@ -120,4 +136,4 @@ __WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */ #undef __need_wint_t #endif /* __need_wint_t */ -#endif +#endif /* __MVS__ */ diff --git a/lib/include/stdint.h b/lib/include/stdint.h index b6699b6ca3..01feab7b1e 100644 --- a/lib/include/stdint.h +++ b/lib/include/stdint.h @@ -14,6 +14,10 @@ #define __CLANG_STDINT_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're hosted, fall back to the system's stdint.h, which might have * additional definitions. */ @@ -947,4 +951,5 @@ typedef __UINTMAX_TYPE__ uintmax_t; #endif #endif /* __STDC_HOSTED__ */ +#endif /* __MVS__ */ #endif /* __CLANG_STDINT_H */ diff --git a/lib/include/stdnoreturn.h b/lib/include/stdnoreturn.h index c90bf77e84..6a9b209c72 100644 --- a/lib/include/stdnoreturn.h +++ b/lib/include/stdnoreturn.h @@ -10,9 +10,15 @@ #ifndef __STDNORETURN_H #define __STDNORETURN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #define noreturn _Noreturn #define __noreturn_is_defined 1 +#endif /* __MVS__ */ + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* The noreturn macro is deprecated in C23. We do not mark it as such because diff --git a/lib/include/tmmintrin.h b/lib/include/tmmintrin.h index 7d8dc46c57..bf8327b692 100644 --- a/lib/include/tmmintrin.h +++ b/lib/include/tmmintrin.h @@ -271,10 +271,11 @@ _mm_hadd_pi32(__m64 __a, __m64 __b) return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); } -/// Horizontally adds the adjacent pairs of values contained in 2 packed -/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are -/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -296,10 +297,11 @@ _mm_hadds_epi16(__m128i __a, __m128i __b) return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); } -/// Horizontally adds the adjacent pairs of values contained in 2 packed -/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are -/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -413,10 +415,11 @@ _mm_hsub_pi32(__m64 __a, __m64 __b) return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b); } -/// Horizontally subtracts the adjacent pairs of values contained in 2 -/// packed 128-bit vectors of [8 x i16]. Positive differences greater than -/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are -/// saturated to 0x8000. +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -438,10 +441,11 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b) return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b); } -/// Horizontally subtracts the adjacent pairs of values contained in 2 -/// packed 64-bit vectors of [4 x i16]. Positive differences greater than -/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are -/// saturated to 0x8000. +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// diff --git a/lib/include/varargs.h b/lib/include/varargs.h index d241b7de3c..d33ddc5ae7 100644 --- a/lib/include/varargs.h +++ b/lib/include/varargs.h @@ -8,5 +8,9 @@ */ #ifndef __VARARGS_H #define __VARARGS_H - #error "Please use instead of " +#if defined(__MVS__) && __has_include_next() +#include_next +#else +#error "Please use instead of " +#endif /* __MVS__ */ #endif diff --git a/lib/include/x86gprintrin.h b/lib/include/x86gprintrin.h index ed141879fb..3d5cc606d7 100644 --- a/lib/include/x86gprintrin.h +++ b/lib/include/x86gprintrin.h @@ -10,38 +10,31 @@ #ifndef __X86GPRINTRIN_H #define __X86GPRINTRIN_H -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__HRESET__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__UINTR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__USERMSR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CRC32__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PRFCHI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RAOINT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CMPCCXADD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__) #include #endif diff --git a/lib/include/x86intrin.h b/lib/include/x86intrin.h index 450fd008da..f42e9e580f 100644 --- a/lib/include/x86intrin.h +++ b/lib/include/x86intrin.h @@ -14,53 +14,39 @@ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__3dNOW__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PRFCHW__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE4A__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FMA4__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XOP__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__TBM__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__LWP__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MWAITX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLZERO__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDPRU__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__) #include #endif diff --git a/lib/include/xmmintrin.h b/lib/include/xmmintrin.h index 47368f3c23..6fb27297af 100644 --- a/lib/include/xmmintrin.h +++ b/lib/include/xmmintrin.h @@ -316,6 +316,8 @@ _mm_rsqrt_ps(__m128 __a) /// operands and returns the lesser value in the low-order bits of the /// vector of [4 x float]. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINSS / MINSS instructions. @@ -338,6 +340,8 @@ _mm_min_ss(__m128 __a, __m128 __b) /// Compares two 128-bit vectors of [4 x float] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPS / MINPS instructions. @@ -358,6 +362,8 @@ _mm_min_ps(__m128 __a, __m128 __b) /// operands and returns the greater value in the low-order bits of a 128-bit /// vector of [4 x float]. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXSS / MAXSS instructions. @@ -380,6 +386,8 @@ _mm_max_ss(__m128 __a, __m128 __b) /// Compares two 128-bit vectors of [4 x float] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPS / MAXPS instructions. @@ -474,8 +482,11 @@ _mm_xor_ps(__m128 __a, __m128 __b) } /// Compares two 32-bit float values in the low-order bits of both -/// operands for equality and returns the result of the comparison in the +/// operands for equality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the /// low-order bits of a vector [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -498,6 +509,9 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b) /// Compares each of the corresponding 32-bit float values of the /// 128-bit vectors of [4 x float] for equality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions. @@ -515,8 +529,11 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is less than the -/// corresponding value in the second operand and returns the result of the -/// comparison in the low-order bits of a vector of [4 x float]. +/// corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -540,6 +557,9 @@ _mm_cmplt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are less than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. @@ -557,9 +577,11 @@ _mm_cmplt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is less than or -/// equal to the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// equal to the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in +/// the low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -583,6 +605,9 @@ _mm_cmple_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are less than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. @@ -600,8 +625,11 @@ _mm_cmple_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is greater than -/// the corresponding value in the second operand and returns the result of -/// the comparison in the low-order bits of a vector of [4 x float]. +/// the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -627,6 +655,9 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. @@ -644,9 +675,11 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is greater than -/// or equal to the corresponding value in the second operand and returns -/// the result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -672,6 +705,9 @@ _mm_cmpge_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are greater than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. @@ -687,9 +723,12 @@ _mm_cmpge_ps(__m128 __a, __m128 __b) return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a); } -/// Compares two 32-bit float values in the low-order bits of both -/// operands for inequality and returns the result of the comparison in the +/// Compares two 32-bit float values in the low-order bits of both operands +/// for inequality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the /// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -713,6 +752,9 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b) /// Compares each of the corresponding 32-bit float values of the /// 128-bit vectors of [4 x float] for inequality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS @@ -731,8 +773,11 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not less than -/// the corresponding value in the second operand and returns the result of -/// the comparison in the low-order bits of a vector of [4 x float]. +/// the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -757,6 +802,9 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not less than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS @@ -775,9 +823,11 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not less than -/// or equal to the corresponding value in the second operand and returns -/// the result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -802,6 +852,9 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not less than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS @@ -820,9 +873,11 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not greater -/// than the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// than the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -849,6 +904,9 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS @@ -867,9 +925,11 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not greater -/// than or equal to the corresponding value in the second operand and -/// returns the result of the comparison in the low-order bits of a vector -/// of [4 x float]. +/// than or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -896,6 +956,9 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not greater than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS @@ -914,9 +977,11 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is ordered with -/// respect to the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// respect to the corresponding value in the second operand. +/// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. /// /// \headerfile /// @@ -941,6 +1006,10 @@ _mm_cmpord_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are ordered with respect to those in the second operand. /// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPORDPS / CMPORDPS @@ -959,9 +1028,11 @@ _mm_cmpord_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is unordered -/// with respect to the corresponding value in the second operand and -/// returns the result of the comparison in the low-order bits of a vector -/// of [4 x float]. +/// with respect to the corresponding value in the second operand. +/// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -986,6 +1057,10 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are unordered with respect to those in the second operand. /// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS @@ -1003,9 +1078,10 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b) } /// Compares two 32-bit float values in the low-order bits of both -/// operands for equality and returns the result of the comparison. +/// operands for equality. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1018,8 +1094,7 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_ss(__m128 __a, __m128 __b) { @@ -1028,9 +1103,10 @@ _mm_comieq_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is less than the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1043,8 +1119,7 @@ _mm_comieq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_ss(__m128 __a, __m128 __b) { @@ -1053,9 +1128,10 @@ _mm_comilt_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is less than or equal to the -/// second operand and returns the result of the comparison. +/// second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1067,8 +1143,7 @@ _mm_comilt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_ss(__m128 __a, __m128 __b) { @@ -1077,9 +1152,10 @@ _mm_comile_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is greater than the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1091,8 +1167,7 @@ _mm_comile_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_ss(__m128 __a, __m128 __b) { @@ -1101,9 +1176,10 @@ _mm_comigt_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is greater than or equal to -/// the second operand and returns the result of the comparison. +/// the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1115,8 +1191,7 @@ _mm_comigt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_ss(__m128 __a, __m128 __b) { @@ -1125,9 +1200,10 @@ _mm_comige_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is not equal to the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1139,8 +1215,7 @@ _mm_comige_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_ss(__m128 __a, __m128 __b) { @@ -1148,10 +1223,10 @@ _mm_comineq_ss(__m128 __a, __m128 __b) } /// Performs an unordered comparison of two 32-bit float values using -/// the low-order bits of both operands to determine equality and returns -/// the result of the comparison. +/// the low-order bits of both operands to determine equality. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1163,8 +1238,7 @@ _mm_comineq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_ss(__m128 __a, __m128 __b) { @@ -1173,9 +1247,10 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// less than the second operand and returns the result of the comparison. +/// less than the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1187,8 +1262,7 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_ss(__m128 __a, __m128 __b) { @@ -1197,10 +1271,10 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// less than or equal to the second operand and returns the result of the -/// comparison. +/// less than or equal to the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1212,8 +1286,7 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_ss(__m128 __a, __m128 __b) { @@ -1222,10 +1295,10 @@ _mm_ucomile_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// greater than the second operand and returns the result of the -/// comparison. +/// greater than the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1237,8 +1310,7 @@ _mm_ucomile_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_ss(__m128 __a, __m128 __b) { @@ -1247,10 +1319,10 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// greater than or equal to the second operand and returns the result of -/// the comparison. +/// greater than or equal to the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1262,8 +1334,7 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_ss(__m128 __a, __m128 __b) { @@ -1271,10 +1342,10 @@ _mm_ucomige_ss(__m128 __a, __m128 __b) } /// Performs an unordered comparison of two 32-bit float values using -/// the low-order bits of both operands to determine inequality and returns -/// the result of the comparison. +/// the low-order bits of both operands to determine inequality. /// -/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1286,8 +1357,7 @@ _mm_ucomige_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_ss(__m128 __a, __m128 __b) { @@ -1297,6 +1367,10 @@ _mm_ucomineq_ss(__m128 __a, __m128 __b) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 32-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1315,6 +1389,10 @@ _mm_cvtss_si32(__m128 __a) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 32-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1335,6 +1413,10 @@ _mm_cvt_ss2si(__m128 __a) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 64-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1355,6 +1437,10 @@ _mm_cvtss_si64(__m128 __a) /// Converts two low-order float values in a 128-bit vector of /// [4 x float] into a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPS2PI instruction. @@ -1371,6 +1457,10 @@ _mm_cvtps_pi32(__m128 __a) /// Converts two low-order float values in a 128-bit vector of /// [4 x float] into a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPS2PI instruction. @@ -1384,9 +1474,12 @@ _mm_cvt_ps2pi(__m128 __a) return _mm_cvtps_pi32(__a); } -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 32-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1403,9 +1496,12 @@ _mm_cvttss_si32(__m128 __a) return __builtin_ia32_cvttss2si((__v4sf)__a); } -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 32-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1423,9 +1519,12 @@ _mm_cvtt_ss2si(__m128 __a) } #ifdef __x86_64__ -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 64-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 64-bit integer. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1443,9 +1542,13 @@ _mm_cvttss_si64(__m128 __a) } #endif -/// Converts two low-order float values in a 128-bit vector of -/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result -/// when it is inexact. +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 32-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1461,9 +1564,13 @@ _mm_cvttps_pi32(__m128 __a) return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a); } -/// Converts two low-order float values in a 128-bit vector of [4 x -/// float] into a 64-bit vector of [2 x i32], truncating the result when it -/// is inexact. +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 64-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1803,7 +1910,7 @@ _mm_undefined_ps(void) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_set_ss(float __w) { - return __extension__ (__m128){ __w, 0, 0, 0 }; + return __extension__ (__m128){ __w, 0.0f, 0.0f, 0.0f }; } /// Constructs a 128-bit floating-point vector of [4 x float], with each @@ -2940,6 +3047,85 @@ _mm_movemask_ps(__m128 __a) return __builtin_ia32_movmskps((__v4sf)__a); } +/* Compare */ +#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ +#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ +#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ +#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ +#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ +#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ +#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ + +/// Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ps(a, b, c) \ + ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) + +/// Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ss(a, b, c) \ + ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) #define _MM_ALIGN16 __attribute__((aligned(16))) diff --git a/lib/include/yvals_core.h b/lib/include/yvals_core.h new file mode 100644 index 0000000000..5ee194a3e5 --- /dev/null +++ b/lib/include/yvals_core.h @@ -0,0 +1,25 @@ +//===----- yvals_core.h - Internal MSVC STL core header -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// Only include this if we are aiming for MSVC compatibility. +#ifndef _MSC_VER +#include_next +#else + +#ifndef __clang_yvals_core_h +#define __clang_yvals_core_h + +#include_next + +#ifdef _STL_INTRIN_HEADER +#undef _STL_INTRIN_HEADER +#define _STL_INTRIN_HEADER +#endif + +#endif +#endif diff --git a/lib/include/zos_wrappers/builtins.h b/lib/include/zos_wrappers/builtins.h new file mode 100644 index 0000000000..1f0d0e27ec --- /dev/null +++ b/lib/include/zos_wrappers/builtins.h @@ -0,0 +1,18 @@ +/*===---- builtins.h - z/Architecture Builtin Functions --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ZOS_WRAPPERS_BUILTINS_H +#define __ZOS_WRAPPERS_BUILTINS_H +#if defined(__MVS__) +#include_next +#if defined(__VEC__) +#include +#endif +#endif /* defined(__MVS__) */ +#endif /* __ZOS_WRAPPERS_BUILTINS_H */ diff --git a/lib/libcxx/include/__algorithm/adjacent_find.h b/lib/libcxx/include/__algorithm/adjacent_find.h index 7819e2cf49..6f15456e3a 100644 --- a/lib/libcxx/include/__algorithm/adjacent_find.h +++ b/lib/libcxx/include/__algorithm/adjacent_find.h @@ -26,7 +26,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter __adjacent_find(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { if (__first == __last) return __first; @@ -40,13 +40,13 @@ __adjacent_find(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator adjacent_find(_ForwardIterator __first, _ForwardIterator __last, _BinaryPredicate __pred) { return std::__adjacent_find(std::move(__first), std::move(__last), __pred); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator adjacent_find(_ForwardIterator __first, _ForwardIterator __last) { return std::adjacent_find(std::move(__first), std::move(__last), __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/all_of.h b/lib/libcxx/include/__algorithm/all_of.h index 237f8495c6..ec84eea759 100644 --- a/lib/libcxx/include/__algorithm/all_of.h +++ b/lib/libcxx/include/__algorithm/all_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool all_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/any_of.h b/lib/libcxx/include/__algorithm/any_of.h index 8ba7aae2b2..b5ff778c41 100644 --- a/lib/libcxx/include/__algorithm/any_of.h +++ b/lib/libcxx/include/__algorithm/any_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool any_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/binary_search.h b/lib/libcxx/include/__algorithm/binary_search.h index 7a77d7b544..6065fc3727 100644 --- a/lib/libcxx/include/__algorithm/binary_search.h +++ b/lib/libcxx/include/__algorithm/binary_search.h @@ -22,14 +22,14 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool binary_search(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { __first = std::lower_bound<_ForwardIterator, _Tp, __comp_ref_type<_Compare> >(__first, __last, __value, __comp); return __first != __last && !__comp(__value, *__first); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool binary_search(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::binary_search(__first, __last, __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/clamp.h b/lib/libcxx/include/__algorithm/clamp.h index 003bf70dd4..1a5a3d0744 100644 --- a/lib/libcxx/include/__algorithm/clamp.h +++ b/lib/libcxx/include/__algorithm/clamp.h @@ -21,7 +21,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, _LIBCPP_LIFETIMEBOUND const _Tp& __lo, _LIBCPP_LIFETIMEBOUND const _Tp& __hi, @@ -31,7 +31,7 @@ clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, _LIBCPP_LIFETIMEBOUND const _Tp& __lo, _LIBCPP_LIFETIMEBOUND const _Tp& __hi) { diff --git a/lib/libcxx/include/__algorithm/comp.h b/lib/libcxx/include/__algorithm/comp.h index 3902f75603..a0fa88d6d2 100644 --- a/lib/libcxx/include/__algorithm/comp.h +++ b/lib/libcxx/include/__algorithm/comp.h @@ -10,8 +10,7 @@ #define _LIBCPP___ALGORITHM_COMP_H #include <__config> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -27,7 +26,7 @@ struct __equal_to { }; template -struct __desugars_to<__equal_tag, __equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, __equal_to, _Tp, _Up> = true; // The definition is required because __less is part of the ABI, but it's empty // because all comparisons should be transparent. @@ -42,6 +41,9 @@ struct __less { } }; +template +inline const bool __desugars_to_v<__less_tag, __less<>, _Tp, _Tp> = true; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___ALGORITHM_COMP_H diff --git a/lib/libcxx/include/__algorithm/comp_ref_type.h b/lib/libcxx/include/__algorithm/comp_ref_type.h index aa9350c38c..c367fbb91a 100644 --- a/lib/libcxx/include/__algorithm/comp_ref_type.h +++ b/lib/libcxx/include/__algorithm/comp_ref_type.h @@ -41,9 +41,9 @@ struct __debug_less { } template - _LIBCPP_CONSTEXPR_SINCE_CXX14 inline _LIBCPP_HIDE_FROM_ABI decltype((void)std::declval<_Compare&>()( - std::declval<_LHS&>(), std::declval<_RHS&>())) - __do_compare_assert(int, _LHS& __l, _RHS& __r) { + _LIBCPP_CONSTEXPR_SINCE_CXX14 inline + _LIBCPP_HIDE_FROM_ABI decltype((void)std::declval<_Compare&>()(std::declval<_LHS&>(), std::declval<_RHS&>())) + __do_compare_assert(int, _LHS& __l, _RHS& __r) { _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(!__comp_(__l, __r), "Comparator does not induce a strict weak ordering"); (void)__l; (void)__r; diff --git a/lib/libcxx/include/__algorithm/copy.h b/lib/libcxx/include/__algorithm/copy.h index 4c3815405a..0890b895f5 100644 --- a/lib/libcxx/include/__algorithm/copy.h +++ b/lib/libcxx/include/__algorithm/copy.h @@ -32,7 +32,7 @@ template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __copy(_InIter, _Sent, _OutIter); template -struct __copy_loop { +struct __copy_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -94,9 +94,7 @@ struct __copy_loop { __local_first = _Traits::__begin(++__segment_iterator); } } -}; -struct __copy_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -108,7 +106,7 @@ struct __copy_trivial { template pair<_InIter, _OutIter> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __copy(_InIter __first, _Sent __last, _OutIter __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __copy_loop<_AlgPolicy>, __copy_trivial>( + return std::__copy_move_unwrap_iters<__copy_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/copy_backward.h b/lib/libcxx/include/__algorithm/copy_backward.h index 3ec88d8bd5..73dc846a97 100644 --- a/lib/libcxx/include/__algorithm/copy_backward.h +++ b/lib/libcxx/include/__algorithm/copy_backward.h @@ -15,7 +15,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter> __copy_backward(_InIter __first, _Sent __last, _OutIter __result); template -struct __copy_backward_loop { +struct __copy_backward_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -104,9 +104,7 @@ struct __copy_backward_loop { __local_last = _Traits::__end(__segment_iterator); } } -}; -struct __copy_backward_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -118,7 +116,7 @@ struct __copy_backward_trivial { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1, _BidirectionalIterator2> __copy_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __copy_backward_loop<_AlgPolicy>, __copy_backward_trivial>( + return std::__copy_move_unwrap_iters<__copy_backward_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/copy_move_common.h b/lib/libcxx/include/__algorithm/copy_move_common.h index 0fc7a5e3ce..8a98451a8f 100644 --- a/lib/libcxx/include/__algorithm/copy_move_common.h +++ b/lib/libcxx/include/__algorithm/copy_move_common.h @@ -19,9 +19,8 @@ #include <__type_traits/enable_if.h> #include <__type_traits/is_always_bitcastable.h> #include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_trivially_assignable.h> -#include <__type_traits/is_trivially_copyable.h> #include <__type_traits/is_volatile.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -81,30 +80,17 @@ __copy_backward_trivial_impl(_In* __first, _In* __last, _Out* __result) { // Iterator unwrapping and dispatching to the correct overload. -template -struct __overload : _F1, _F2 { - using _F1::operator(); - using _F2::operator(); -}; - -template -struct __can_rewrap : false_type {}; - -template -struct __can_rewrap<_InIter, - _Sent, - _OutIter, - // Note that sentinels are always copy-constructible. - __enable_if_t< is_copy_constructible<_InIter>::value && is_copy_constructible<_OutIter>::value > > - : true_type {}; +template +struct __can_rewrap + : integral_constant::value && is_copy_constructible<_OutIter>::value> {}; template ::value, int> = 0> + __enable_if_t<__can_rewrap<_InIter, _OutIter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) { +__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) { auto __range = std::__unwrap_range(__first, std::move(__last)); auto __result = _Algorithm()(std::move(__range.first), std::move(__range.second), std::__unwrap_iter(__out_first)); return std::make_pair(std::__rewrap_range<_Sent>(std::move(__first), std::move(__result.first)), @@ -115,24 +101,12 @@ template ::value, int> = 0> + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) { +__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) { return _Algorithm()(std::move(__first), std::move(__last), std::move(__out_first)); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__dispatch_copy_or_move(_InIter __first, _Sent __last, _OutIter __out_first) { - using _Algorithm = __overload<_NaiveAlgorithm, _OptimizedAlgorithm>; - return std::__unwrap_and_dispatch<_Algorithm>(std::move(__first), std::move(__last), std::move(__out_first)); -} - _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS diff --git a/lib/libcxx/include/__algorithm/count.h b/lib/libcxx/include/__algorithm/count.h index 23a7d3c4dc..1cfe7f631a 100644 --- a/lib/libcxx/include/__algorithm/count.h +++ b/lib/libcxx/include/__algorithm/count.h @@ -79,7 +79,7 @@ __count(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __l } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __iter_diff_t<_InputIterator> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __iter_diff_t<_InputIterator> count(_InputIterator __first, _InputIterator __last, const _Tp& __value) { __identity __proj; return std::__count<_ClassicAlgPolicy>(__first, __last, __value, __proj); diff --git a/lib/libcxx/include/__algorithm/count_if.h b/lib/libcxx/include/__algorithm/count_if.h index 04f52b894f..25782069d0 100644 --- a/lib/libcxx/include/__algorithm/count_if.h +++ b/lib/libcxx/include/__algorithm/count_if.h @@ -20,9 +20,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename iterator_traits<_InputIterator>::difference_type - count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 +typename iterator_traits<_InputIterator>::difference_type +count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { typename iterator_traits<_InputIterator>::difference_type __r(0); for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/equal.h b/lib/libcxx/include/__algorithm/equal.h index 3c0e3060e3..bfc8f72f6e 100644 --- a/lib/libcxx/include/__algorithm/equal.h +++ b/lib/libcxx/include/__algorithm/equal.h @@ -18,12 +18,11 @@ #include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__string/constexpr_c_functions.h> +#include <__type_traits/desugars_to.h> #include <__type_traits/enable_if.h> -#include <__type_traits/integral_constant.h> #include <__type_traits/is_constant_evaluated.h> #include <__type_traits/is_equality_comparable.h> #include <__type_traits/is_volatile.h> -#include <__type_traits/operation_traits.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -47,7 +46,7 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 boo template ::value && !is_volatile<_Tp>::value && + __enable_if_t<__desugars_to_v<__equal_tag, _BinaryPredicate, _Tp, _Up> && !is_volatile<_Tp>::value && !is_volatile<_Up>::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value, int> = 0> _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool @@ -56,33 +55,19 @@ __equal_iter_impl(_Tp* __first1, _Tp* __last1, _Up* __first2, _BinaryPredicate&) } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) { return std::__equal_iter_impl( std::__unwrap_iter(__first1), std::__unwrap_iter(__last1), std::__unwrap_iter(__first2), __pred); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2) { return std::equal(__first1, __last1, __first2, __equal_to()); } #if _LIBCPP_STD_VER >= 14 -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -__equal(_InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _BinaryPredicate __pred, - input_iterator_tag, - input_iterator_tag) { - for (; __first1 != __last1 && __first2 != __last2; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) - return false; - return __first1 == __last1 && __first2 == __last2; -} template _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __equal_impl( @@ -101,7 +86,7 @@ template ::value && __is_identity<_Proj1>::value && + __enable_if_t<__desugars_to_v<__equal_tag, _Pred, _Tp, _Up> && __is_identity<_Proj1>::value && __is_identity<_Proj2>::value && !is_volatile<_Tp>::value && !is_volatile<_Up>::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value, int> = 0> @@ -110,17 +95,18 @@ __equal_impl(_Tp* __first1, _Tp* __last1, _Up* __first2, _Up*, _Pred&, _Proj1&, return std::__constexpr_memcmp_equal(__first1, __first2, __element_count(__last1 - __first1)); } -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -__equal(_RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _BinaryPredicate __pred, - random_access_iterator_tag, - random_access_iterator_tag) { - if (std::distance(__first1, __last1) != std::distance(__first2, __last2)) - return false; +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +equal(_InputIterator1 __first1, + _InputIterator1 __last1, + _InputIterator2 __first2, + _InputIterator2 __last2, + _BinaryPredicate __pred) { + if constexpr (__has_random_access_iterator_category<_InputIterator1>::value && + __has_random_access_iterator_category<_InputIterator2>::value) { + if (std::distance(__first1, __last1) != std::distance(__first2, __last2)) + return false; + } __identity __proj; return std::__equal_impl( std::__unwrap_iter(__first1), @@ -132,36 +118,13 @@ __equal(_RandomAccessIterator1 __first1, __proj); } -template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -equal(_InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _BinaryPredicate __pred) { - return std::__equal<_BinaryPredicate&>( - __first1, - __last1, - __first2, - __last2, - __pred, - typename iterator_traits<_InputIterator1>::iterator_category(), - typename iterator_traits<_InputIterator2>::iterator_category()); +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { + return std::equal(__first1, __last1, __first2, __last2, __equal_to()); } -template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { - return std::__equal( - __first1, - __last1, - __first2, - __last2, - __equal_to(), - typename iterator_traits<_InputIterator1>::iterator_category(), - typename iterator_traits<_InputIterator2>::iterator_category()); -} -#endif +#endif // _LIBCPP_STD_VER >= 14 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/equal_range.h b/lib/libcxx/include/__algorithm/equal_range.h index a942904319..09bbf8f006 100644 --- a/lib/libcxx/include/__algorithm/equal_range.h +++ b/lib/libcxx/include/__algorithm/equal_range.h @@ -23,7 +23,7 @@ #include <__iterator/iterator_traits.h> #include <__iterator/next.h> #include <__type_traits/is_callable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -60,7 +60,7 @@ __equal_range(_Iter __first, _Sent __last, const _Tp& __value, _Compare&& __comp } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(__is_callable<_Compare, decltype(*__first), const _Tp&>::value, "The comparator has to be callable"); static_assert(is_copy_constructible<_ForwardIterator>::value, "Iterator has to be copy constructible"); @@ -73,7 +73,7 @@ equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::equal_range(std::move(__first), std::move(__last), __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/fill_n.h b/lib/libcxx/include/__algorithm/fill_n.h index 36f3349d9e..f29633f880 100644 --- a/lib/libcxx/include/__algorithm/fill_n.h +++ b/lib/libcxx/include/__algorithm/fill_n.h @@ -9,18 +9,74 @@ #ifndef _LIBCPP___ALGORITHM_FILL_N_H #define _LIBCPP___ALGORITHM_FILL_N_H +#include <__algorithm/min.h> #include <__config> +#include <__fwd/bit_reference.h> #include <__iterator/iterator_traits.h> +#include <__memory/pointer_traits.h> #include <__utility/convert_to_integral.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD // fill_n isn't specialized for std::memset, because the compiler already optimizes the loop to a call to std::memset. +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator +__fill_n(_OutputIterator __first, _Size __n, const _Tp& __value); + +template +_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void +__fill_n_bool(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { + using _It = __bit_iterator<_Cp, false>; + using __storage_type = typename _It::__storage_type; + + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = std::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + if (_FillVal) + *__first.__seg_ |= __m; + else + *__first.__seg_ &= ~__m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + std::__fill_n(std::__to_address(__first.__seg_), __nw, _FillVal ? static_cast<__storage_type>(-1) : 0); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if (_FillVal) + *__first.__seg_ |= __m; + else + *__first.__seg_ &= ~__m; + } +} + +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cp, false> +__fill_n(__bit_iterator<_Cp, false> __first, _Size __n, const bool& __value) { + if (__n > 0) { + if (__value) + std::__fill_n_bool(__first, __n); + else + std::__fill_n_bool(__first, __n); + } + return __first + __n; +} + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator __fill_n(_OutputIterator __first, _Size __n, const _Tp& __value) { @@ -37,4 +93,6 @@ fill_n(_OutputIterator __first, _Size __n, const _Tp& __value) { _LIBCPP_END_NAMESPACE_STD +_LIBCPP_POP_MACROS + #endif // _LIBCPP___ALGORITHM_FILL_N_H diff --git a/lib/libcxx/include/__algorithm/find.h b/lib/libcxx/include/__algorithm/find.h index 7d7631b6e9..7f58dbb13a 100644 --- a/lib/libcxx/include/__algorithm/find.h +++ b/lib/libcxx/include/__algorithm/find.h @@ -43,7 +43,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // generic implementation template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter -__find_impl(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { +__find(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { for (; __first != __last; ++__first) if (std::__invoke(__proj, *__first) == __value) break; @@ -57,8 +57,7 @@ template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && sizeof(_Tp) == 1, int> = 0> -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { if (auto __ret = std::__constexpr_memchr(__first, __value, __last - __first)) return __ret; return __last; @@ -71,8 +70,7 @@ template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && sizeof(_Tp) == sizeof(wchar_t) && _LIBCPP_ALIGNOF(_Tp) >= _LIBCPP_ALIGNOF(wchar_t), int> = 0> -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { if (auto __ret = std::__constexpr_wmemchr(__first, __value, __last - __first)) return __ret; return __last; @@ -89,10 +87,10 @@ template ::value == is_signed<_Up>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj& __proj) { +__find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj& __proj) { if (__value < numeric_limits<_Tp>::min() || __value > numeric_limits<_Tp>::max()) return __last; - return std::__find_impl(__first, __last, _Tp(__value), __proj); + return std::__find(__first, __last, _Tp(__value), __proj); } // __bit_iterator implementation @@ -134,7 +132,7 @@ __find_bool(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) template ::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cp, _IsConst> -__find_impl(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value, _Proj&) { +__find(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value, _Proj&) { if (static_cast(__value)) return std::__find_bool(__first, static_cast(__last - __first)); return std::__find_bool(__first, static_cast(__last - __first)); @@ -150,7 +148,7 @@ template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _SegmentedIterator -__find_impl(_SegmentedIterator __first, _SegmentedIterator __last, const _Tp& __value, _Proj& __proj) { +__find(_SegmentedIterator __first, _SegmentedIterator __last, const _Tp& __value, _Proj& __proj) { return std::__find_segment_if(std::move(__first), std::move(__last), __find_segment<_Tp>(__value), __proj); } @@ -163,17 +161,17 @@ struct __find_segment { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _InputIterator operator()(_InputIterator __first, _InputIterator __last, _Proj& __proj) const { - return std::__find_impl(__first, __last, __value_, __proj); + return std::__find(__first, __last, __value_, __proj); } }; // public API template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find(_InputIterator __first, _InputIterator __last, const _Tp& __value) { __identity __proj; return std::__rewrap_iter( - __first, std::__find_impl(std::__unwrap_iter(__first), std::__unwrap_iter(__last), __value, __proj)); + __first, std::__find(std::__unwrap_iter(__first), std::__unwrap_iter(__last), __value, __proj)); } _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/find_end.h b/lib/libcxx/include/__algorithm/find_end.h index 4c26891666..7e08e79535 100644 --- a/lib/libcxx/include/__algorithm/find_end.h +++ b/lib/libcxx/include/__algorithm/find_end.h @@ -205,7 +205,7 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Fo } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -215,7 +215,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::find_end(__first1, __last1, __first2, __last2, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/find_first_of.h b/lib/libcxx/include/__algorithm/find_first_of.h index 14271cccc4..6b99f562f8 100644 --- a/lib/libcxx/include/__algorithm/find_first_of.h +++ b/lib/libcxx/include/__algorithm/find_first_of.h @@ -35,7 +35,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator1 __find_fir } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -45,7 +45,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::__find_first_of_ce(__first1, __last1, __first2, __last2, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/find_if.h b/lib/libcxx/include/__algorithm/find_if.h index 09a39f6463..22092d352b 100644 --- a/lib/libcxx/include/__algorithm/find_if.h +++ b/lib/libcxx/include/__algorithm/find_if.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/find_if_not.h b/lib/libcxx/include/__algorithm/find_if_not.h index bf29ebb7cd..cc2001967f 100644 --- a/lib/libcxx/include/__algorithm/find_if_not.h +++ b/lib/libcxx/include/__algorithm/find_if_not.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find_if_not(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/fold.h b/lib/libcxx/include/__algorithm/fold.h index 1a9d76b50d..255658f523 100644 --- a/lib/libcxx/include/__algorithm/fold.h +++ b/lib/libcxx/include/__algorithm/fold.h @@ -78,8 +78,7 @@ concept __indirectly_binary_left_foldable = struct __fold_left_with_iter { template _Sp, class _Tp, __indirectly_binary_left_foldable<_Tp, _Ip> _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto - operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { using _Up = decay_t>>; if (__first == __last) { @@ -95,7 +94,7 @@ struct __fold_left_with_iter { } template > _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { auto __result = operator()(ranges::begin(__r), ranges::end(__r), std::move(__init), std::ref(__f)); using _Up = decay_t>>; @@ -107,13 +106,12 @@ inline constexpr auto fold_left_with_iter = __fold_left_with_iter(); struct __fold_left { template _Sp, class _Tp, __indirectly_binary_left_foldable<_Tp, _Ip> _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto - operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { return fold_left_with_iter(std::move(__first), std::move(__last), std::move(__init), std::ref(__f)).value; } template > _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { return fold_left_with_iter(ranges::begin(__r), ranges::end(__r), std::move(__init), std::ref(__f)).value; } }; diff --git a/lib/libcxx/include/__algorithm/includes.h b/lib/libcxx/include/__algorithm/includes.h index 05d45365eb..62af03c374 100644 --- a/lib/libcxx/include/__algorithm/includes.h +++ b/lib/libcxx/include/__algorithm/includes.h @@ -47,7 +47,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __includes( } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool includes(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, @@ -67,7 +67,7 @@ includes(_InputIterator1 __first1, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool includes(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::includes(std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/inplace_merge.h b/lib/libcxx/include/__algorithm/inplace_merge.h index eb3c0bdbc2..a6bcc66a2f 100644 --- a/lib/libcxx/include/__algorithm/inplace_merge.h +++ b/lib/libcxx/include/__algorithm/inplace_merge.h @@ -114,8 +114,8 @@ _LIBCPP_HIDE_FROM_ABI void __buffered_inplace_merge( for (_BidirectionalIterator __i = __middle; __i != __last; __d.template __incr(), (void)++__i, (void)++__p) ::new ((void*)__p) value_type(_IterOps<_AlgPolicy>::__iter_move(__i)); - typedef __unconstrained_reverse_iterator<_BidirectionalIterator> _RBi; - typedef __unconstrained_reverse_iterator _Rv; + typedef reverse_iterator<_BidirectionalIterator> _RBi; + typedef reverse_iterator _Rv; typedef __invert<_Compare> _Inverted; std::__half_inplace_merge<_AlgPolicy>( _Rv(__p), _Rv(__buff), _RBi(__middle), _RBi(__first), _RBi(__last), _Inverted(__comp)); diff --git a/lib/libcxx/include/__algorithm/is_heap.h b/lib/libcxx/include/__algorithm/is_heap.h index 0d2d43c2c3..c589b804a5 100644 --- a/lib/libcxx/include/__algorithm/is_heap.h +++ b/lib/libcxx/include/__algorithm/is_heap.h @@ -22,13 +22,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { return std::__is_heap_until(__first, __last, static_cast<__comp_ref_type<_Compare> >(__comp)) == __last; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_heap(_RandomAccessIterator __first, _RandomAccessIterator __last) { return std::is_heap(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_heap_until.h b/lib/libcxx/include/__algorithm/is_heap_until.h index 1eae3b86b9..a174f2453c 100644 --- a/lib/libcxx/include/__algorithm/is_heap_until.h +++ b/lib/libcxx/include/__algorithm/is_heap_until.h @@ -46,13 +46,13 @@ __is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Co } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { return std::__is_heap_until(__first, __last, static_cast<__comp_ref_type<_Compare> >(__comp)); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last) { return std::__is_heap_until(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_partitioned.h b/lib/libcxx/include/__algorithm/is_partitioned.h index 71feed3320..1f7c8b0b26 100644 --- a/lib/libcxx/include/__algorithm/is_partitioned.h +++ b/lib/libcxx/include/__algorithm/is_partitioned.h @@ -18,7 +18,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_partitioned(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/is_permutation.h b/lib/libcxx/include/__algorithm/is_permutation.h index 4226151222..2ddfb32a21 100644 --- a/lib/libcxx/include/__algorithm/is_permutation.h +++ b/lib/libcxx/include/__algorithm/is_permutation.h @@ -113,7 +113,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation_impl( // 2+1 iterators, predicate. Not used by range algorithms. template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( _ForwardIterator1 __first1, _Sentinel1 __last1, _ForwardIterator2 __first2, _BinaryPredicate&& __pred) { // Shorten sequences as much as possible by lopping of any equal prefix. for (; __first1 != __last1; ++__first1, (void)++__first2) { @@ -247,7 +247,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( // 2+1 iterators, predicate template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _BinaryPredicate __pred) { static_assert(__is_callable<_BinaryPredicate, decltype(*__first1), decltype(*__first2)>::value, "The predicate has to be callable"); @@ -257,7 +257,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool i // 2+1 iterators template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { return std::is_permutation(__first1, __last1, __first2, __equal_to()); } @@ -266,7 +266,7 @@ is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIt // 2+2 iterators template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::__is_permutation<_ClassicAlgPolicy>( std::move(__first1), @@ -280,7 +280,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 // 2+2 iterators, predicate template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, diff --git a/lib/libcxx/include/__algorithm/is_sorted.h b/lib/libcxx/include/__algorithm/is_sorted.h index 1874cace88..3befb1ac9c 100644 --- a/lib/libcxx/include/__algorithm/is_sorted.h +++ b/lib/libcxx/include/__algorithm/is_sorted.h @@ -22,13 +22,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_sorted(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__is_sorted_until<__comp_ref_type<_Compare> >(__first, __last, __comp) == __last; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_sorted(_ForwardIterator __first, _ForwardIterator __last) { return std::is_sorted(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_sorted_until.h b/lib/libcxx/include/__algorithm/is_sorted_until.h index 7450440df2..53a49f00de 100644 --- a/lib/libcxx/include/__algorithm/is_sorted_until.h +++ b/lib/libcxx/include/__algorithm/is_sorted_until.h @@ -35,13 +35,13 @@ __is_sorted_until(_ForwardIterator __first, _ForwardIterator __last, _Compare __ } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator is_sorted_until(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__is_sorted_until<__comp_ref_type<_Compare> >(__first, __last, __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator is_sorted_until(_ForwardIterator __first, _ForwardIterator __last) { return std::is_sorted_until(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/iterator_operations.h b/lib/libcxx/include/__algorithm/iterator_operations.h index 5cf13f0a3f..8ced989233 100644 --- a/lib/libcxx/include/__algorithm/iterator_operations.h +++ b/lib/libcxx/include/__algorithm/iterator_operations.h @@ -11,6 +11,7 @@ #include <__algorithm/iter_swap.h> #include <__algorithm/ranges_iterator_concept.h> +#include <__assert> #include <__config> #include <__iterator/advance.h> #include <__iterator/distance.h> @@ -160,6 +161,59 @@ struct _IterOps<_ClassicAlgPolicy> { _LIBCPP_HIDE_FROM_ABI static _LIBCPP_CONSTEXPR_SINCE_CXX14 void __advance_to(_Iter& __first, _Iter __last) { __first = __last; } + + // advance with sentinel, a la std::ranges::advance + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_Iter> + __advance_to(_Iter& __iter, __difference_type<_Iter> __count, const _Iter& __sentinel) { + return _IterOps::__advance_to(__iter, __count, __sentinel, typename iterator_traits<_Iter>::iterator_category()); + } + +private: + // advance with sentinel, a la std::ranges::advance -- InputIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_InputIter> __advance_to( + _InputIter& __iter, __difference_type<_InputIter> __count, const _InputIter& __sentinel, input_iterator_tag) { + __difference_type<_InputIter> __dist = 0; + for (; __dist < __count && __iter != __sentinel; ++__dist) + ++__iter; + return __count - __dist; + } + + // advance with sentinel, a la std::ranges::advance -- BidirectionalIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_BiDirIter> + __advance_to(_BiDirIter& __iter, + __difference_type<_BiDirIter> __count, + const _BiDirIter& __sentinel, + bidirectional_iterator_tag) { + __difference_type<_BiDirIter> __dist = 0; + if (__count >= 0) + for (; __dist < __count && __iter != __sentinel; ++__dist) + ++__iter; + else + for (__count = -__count; __dist < __count && __iter != __sentinel; ++__dist) + --__iter; + return __count - __dist; + } + + // advance with sentinel, a la std::ranges::advance -- RandomIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_RandIter> + __advance_to(_RandIter& __iter, + __difference_type<_RandIter> __count, + const _RandIter& __sentinel, + random_access_iterator_tag) { + auto __dist = _IterOps::distance(__iter, __sentinel); + _LIBCPP_ASSERT_VALID_INPUT_RANGE( + __count == 0 || (__dist < 0) == (__count < 0), "__sentinel must precede __iter when __count < 0"); + if (__count < 0) + __dist = __dist > __count ? __dist : __count; + else + __dist = __dist < __count ? __dist : __count; + __iter += __dist; + return __count - __dist; + } }; _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/lexicographical_compare.h b/lib/libcxx/include/__algorithm/lexicographical_compare.h index 3efd8e24bf..edc29e269c 100644 --- a/lib/libcxx/include/__algorithm/lexicographical_compare.h +++ b/lib/libcxx/include/__algorithm/lexicographical_compare.h @@ -37,7 +37,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __lexicographical_compa } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, @@ -47,7 +47,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::lexicographical_compare(__first1, __last1, __first2, __last2, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h b/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h index 32de97d07a..a5872e90cf 100644 --- a/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h +++ b/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h @@ -17,7 +17,7 @@ #include <__config> #include <__iterator/iterator_traits.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -90,7 +90,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __lexicographical_compare_three_way_slow_pa } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _Cmp __comp) -> decltype(__comp(*__first1, *__first2)) { static_assert(__comparison_category, @@ -110,7 +110,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compa } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::lexicographical_compare_three_way( std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::compare_three_way()); diff --git a/lib/libcxx/include/__algorithm/lower_bound.h b/lib/libcxx/include/__algorithm/lower_bound.h index 8f57f3592c..c417d84835 100644 --- a/lib/libcxx/include/__algorithm/lower_bound.h +++ b/lib/libcxx/include/__algorithm/lower_bound.h @@ -27,11 +27,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter -__lower_bound(_Iter __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { - auto __len = _IterOps<_AlgPolicy>::distance(__first, __last); - +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter __lower_bound_bisecting( + _Iter __first, + const _Type& __value, + typename iterator_traits<_Iter>::difference_type __len, + _Comp& __comp, + _Proj& __proj) { while (__len != 0) { auto __l2 = std::__half_positive(__len); _Iter __m = __first; @@ -46,8 +48,50 @@ __lower_bound(_Iter __first, _Sent __last, const _Type& __value, _Comp& __comp, return __first; } +// One-sided binary search, aka meta binary search, has been in the public domain for decades, and has the general +// advantage of being \Omega(1) rather than the classic algorithm's \Omega(log(n)), with the downside of executing at +// most 2*log(n) comparisons vs the classic algorithm's exact log(n). There are two scenarios in which it really shines: +// the first one is when operating over non-random-access iterators, because the classic algorithm requires knowing the +// container's size upfront, which adds \Omega(n) iterator increments to the complexity. The second one is when you're +// traversing the container in order, trying to fast-forward to the next value: in that case, the classic algorithm +// would yield \Omega(n*log(n)) comparisons and, for non-random-access iterators, \Omega(n^2) iterator increments, +// whereas the one-sided version will yield O(n) operations on both counts, with a \Omega(log(n)) bound on the number of +// comparisons. +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +__lower_bound_onesided(_ForwardIterator __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { + // step = 0, ensuring we can always short-circuit when distance is 1 later on + if (__first == __last || !std::__invoke(__comp, std::__invoke(__proj, *__first), __value)) + return __first; + + using _Distance = typename iterator_traits<_ForwardIterator>::difference_type; + for (_Distance __step = 1; __first != __last; __step <<= 1) { + auto __it = __first; + auto __dist = __step - _IterOps<_AlgPolicy>::__advance_to(__it, __step, __last); + // once we reach the last range where needle can be we must start + // looking inwards, bisecting that range + if (__it == __last || !std::__invoke(__comp, std::__invoke(__proj, *__it), __value)) { + // we've already checked the previous value and it was less, we can save + // one comparison by skipping bisection + if (__dist == 1) + return __it; + return std::__lower_bound_bisecting<_AlgPolicy>(__first, __value, __dist, __comp, __proj); + } + // range not found, move forward! + __first = __it; + } + return __first; +} + +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +__lower_bound(_ForwardIterator __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { + const auto __dist = _IterOps<_AlgPolicy>::distance(__first, __last); + return std::__lower_bound_bisecting<_AlgPolicy>(__first, __value, __dist, __comp, __proj); +} + template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(__is_callable<_Compare, decltype(*__first), const _Tp&>::value, "The comparator has to be callable"); auto __proj = std::__identity(); @@ -55,7 +99,7 @@ lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::lower_bound(__first, __last, __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/make_projected.h b/lib/libcxx/include/__algorithm/make_projected.h index bb7bc7e8c0..5245e523f3 100644 --- a/lib/libcxx/include/__algorithm/make_projected.h +++ b/lib/libcxx/include/__algorithm/make_projected.h @@ -36,8 +36,8 @@ struct _ProjectedPred { : __pred(__pred_arg), __proj(__proj_arg) {} template - typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_Tp>())) >:: - type _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI + typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_Tp>()))>::type + _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI operator()(_Tp&& __v) const { return std::__invoke(__pred, std::__invoke(__proj, std::forward<_Tp>(__v))); } @@ -45,8 +45,8 @@ struct _ProjectedPred { template typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_T1>())), - decltype(std::__invoke(std::declval<_Proj&>(), - std::declval<_T2>())) >::type _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI + decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_T2>()))>::type _LIBCPP_CONSTEXPR + _LIBCPP_HIDE_FROM_ABI operator()(_T1&& __lhs, _T2&& __rhs) const { return std::__invoke( __pred, std::__invoke(__proj, std::forward<_T1>(__lhs)), std::__invoke(__proj, std::forward<_T2>(__rhs))); diff --git a/lib/libcxx/include/__algorithm/max.h b/lib/libcxx/include/__algorithm/max.h index 8171677f15..d4c99f6f36 100644 --- a/lib/libcxx/include/__algorithm/max.h +++ b/lib/libcxx/include/__algorithm/max.h @@ -25,13 +25,13 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__a, __b) ? __b : __a; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::max(__a, __b, __less<>()); } @@ -39,13 +39,13 @@ max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t, _Compare __comp) { return *std::__max_element<__comp_ref_type<_Compare> >(__t.begin(), __t.end(), __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t) { return *std::max_element(__t.begin(), __t.end(), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/max_element.h b/lib/libcxx/include/__algorithm/max_element.h index f1d4f1cd09..c036726cbc 100644 --- a/lib/libcxx/include/__algorithm/max_element.h +++ b/lib/libcxx/include/__algorithm/max_element.h @@ -35,13 +35,13 @@ __max_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator max_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__max_element<__comp_ref_type<_Compare> >(__first, __last, __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator max_element(_ForwardIterator __first, _ForwardIterator __last) { return std::max_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/min.h b/lib/libcxx/include/__algorithm/min.h index 919508486f..1bafad8a46 100644 --- a/lib/libcxx/include/__algorithm/min.h +++ b/lib/libcxx/include/__algorithm/min.h @@ -25,13 +25,13 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__b, __a) ? __b : __a; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::min(__a, __b, __less<>()); } @@ -39,13 +39,13 @@ min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t, _Compare __comp) { return *std::__min_element<__comp_ref_type<_Compare> >(__t.begin(), __t.end(), __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t) { return *std::min_element(__t.begin(), __t.end(), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/min_element.h b/lib/libcxx/include/__algorithm/min_element.h index c576d66560..65f3594d63 100644 --- a/lib/libcxx/include/__algorithm/min_element.h +++ b/lib/libcxx/include/__algorithm/min_element.h @@ -48,7 +48,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter __min_element(_Iter __ } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator min_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { static_assert( __has_forward_iterator_category<_ForwardIterator>::value, "std::min_element requires a ForwardIterator"); @@ -59,7 +59,7 @@ min_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator min_element(_ForwardIterator __first, _ForwardIterator __last) { return std::min_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/minmax.h b/lib/libcxx/include/__algorithm/minmax.h index 5227b88571..9feda2b4c0 100644 --- a/lib/libcxx/include/__algorithm/minmax.h +++ b/lib/libcxx/include/__algorithm/minmax.h @@ -24,13 +24,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__b, __a) ? pair(__b, __a) : pair(__a, __b); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::minmax(__a, __b, __less<>()); } @@ -38,7 +38,7 @@ minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __ #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> minmax(initializer_list<_Tp> __t, _Compare __comp) { static_assert(__is_callable<_Compare, _Tp, _Tp>::value, "The comparator has to be callable"); __identity __proj; @@ -47,7 +47,7 @@ minmax(initializer_list<_Tp> __t, _Compare __comp) { } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> minmax(initializer_list<_Tp> __t) { return std::minmax(__t, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/minmax_element.h b/lib/libcxx/include/__algorithm/minmax_element.h index ff8cda321c..43cb23347c 100644 --- a/lib/libcxx/include/__algorithm/minmax_element.h +++ b/lib/libcxx/include/__algorithm/minmax_element.h @@ -79,7 +79,7 @@ __minmax_element_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> minmax_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { static_assert( __has_forward_iterator_category<_ForwardIterator>::value, "std::minmax_element requires a ForwardIterator"); @@ -90,9 +90,8 @@ minmax_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __com } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - pair<_ForwardIterator, _ForwardIterator> - minmax_element(_ForwardIterator __first, _ForwardIterator __last) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> +minmax_element(_ForwardIterator __first, _ForwardIterator __last) { return std::minmax_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/mismatch.h b/lib/libcxx/include/__algorithm/mismatch.h index d345b6048a..632bec0240 100644 --- a/lib/libcxx/include/__algorithm/mismatch.h +++ b/lib/libcxx/include/__algorithm/mismatch.h @@ -11,47 +11,200 @@ #define _LIBCPP___ALGORITHM_MISMATCH_H #include <__algorithm/comp.h> +#include <__algorithm/min.h> +#include <__algorithm/simd_utils.h> +#include <__algorithm/unwrap_iter.h> #include <__config> -#include <__iterator/iterator_traits.h> +#include <__functional/identity.h> +#include <__iterator/aliasing_iterator.h> +#include <__type_traits/desugars_to.h> +#include <__type_traits/invoke.h> +#include <__type_traits/is_constant_evaluated.h> +#include <__type_traits/is_equality_comparable.h> +#include <__type_traits/is_integral.h> +#include <__utility/move.h> #include <__utility/pair.h> +#include <__utility/unreachable.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD -template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> -mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) { - for (; __first1 != __last1; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> +__mismatch_loop(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + while (__first1 != __last1) { + if (!std::__invoke(__pred, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2))) break; - return pair<_InputIterator1, _InputIterator2>(__first1, __first2); + ++__first1; + ++__first2; + } + return std::make_pair(std::move(__first1), std::move(__first2)); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> +__mismatch(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2); +} + +#if _LIBCPP_VECTORIZE_ALGORITHMS + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter, _Iter> +__mismatch_vectorized(_Iter __first1, _Iter __last1, _Iter __first2) { + using __value_type = __iter_value_type<_Iter>; + constexpr size_t __unroll_count = 4; + constexpr size_t __vec_size = __native_vector_size<__value_type>; + using __vec = __simd_vector<__value_type, __vec_size>; + + if (!__libcpp_is_constant_evaluated()) { + auto __orig_first1 = __first1; + auto __last2 = __first2 + (__last1 - __first1); + while (static_cast(__last1 - __first1) >= __unroll_count * __vec_size) [[__unlikely__]] { + __vec __lhs[__unroll_count]; + __vec __rhs[__unroll_count]; + + for (size_t __i = 0; __i != __unroll_count; ++__i) { + __lhs[__i] = std::__load_vector<__vec>(__first1 + __i * __vec_size); + __rhs[__i] = std::__load_vector<__vec>(__first2 + __i * __vec_size); + } + + for (size_t __i = 0; __i != __unroll_count; ++__i) { + if (auto __cmp_res = __lhs[__i] == __rhs[__i]; !std::__all_of(__cmp_res)) { + auto __offset = __i * __vec_size + std::__find_first_not_set(__cmp_res); + return {__first1 + __offset, __first2 + __offset}; + } + } + + __first1 += __unroll_count * __vec_size; + __first2 += __unroll_count * __vec_size; + } + + // check the remaining 0-3 vectors + while (static_cast(__last1 - __first1) >= __vec_size) { + if (auto __cmp_res = std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2); + !std::__all_of(__cmp_res)) { + auto __offset = std::__find_first_not_set(__cmp_res); + return {__first1 + __offset, __first2 + __offset}; + } + __first1 += __vec_size; + __first2 += __vec_size; + } + + if (__last1 - __first1 == 0) + return {__first1, __first2}; + + // Check if we can load elements in front of the current pointer. If that's the case load a vector at + // (last - vector_size) to check the remaining elements + if (static_cast(__first1 - __orig_first1) >= __vec_size) { + __first1 = __last1 - __vec_size; + __first2 = __last2 - __vec_size; + auto __offset = + std::__find_first_not_set(std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2)); + return {__first1 + __offset, __first2 + __offset}; + } // else loop over the elements individually + } + + __equal_to __pred; + __identity __proj; + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj, __proj); +} + +template ::value && __desugars_to_v<__equal_tag, _Pred, _Tp, _Tp> && + __is_identity<_Proj1>::value && __is_identity<_Proj2>::value, + int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Pred&, _Proj1&, _Proj2&) { + return std::__mismatch_vectorized(__first1, __last1, __first2); +} + +template ::value && __desugars_to_v<__equal_tag, _Pred, _Tp, _Tp> && + __is_identity<_Proj1>::value && __is_identity<_Proj2>::value && + __can_map_to_integer_v<_Tp> && __libcpp_is_trivially_equality_comparable<_Tp, _Tp>::value, + int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + if (__libcpp_is_constant_evaluated()) { + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2); + } else { + using _Iter = __aliasing_iterator<_Tp*, __get_as_integer_type_t<_Tp>>; + auto __ret = std::__mismatch_vectorized(_Iter(__first1), _Iter(__last1), _Iter(__first2)); + return {__ret.first.__base(), __ret.second.__base()}; + } +} +#endif // _LIBCPP_VECTORIZE_ALGORITHMS + +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) { + __identity __proj; + auto __res = std::__mismatch( + std::__unwrap_iter(__first1), std::__unwrap_iter(__last1), std::__unwrap_iter(__first2), __pred, __proj, __proj); + return std::make_pair(std::__rewrap_iter(__first1, __res.first), std::__rewrap_iter(__first2, __res.second)); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2) { return std::mismatch(__first1, __last1, __first2, __equal_to()); } #if _LIBCPP_STD_VER >= 14 +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> __mismatch( + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + while (__first1 != __last1 && __first2 != __last2) { + if (!std::__invoke(__pred, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2))) + break; + ++__first1; + ++__first2; + } + return {std::move(__first1), std::move(__first2)}; +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Tp* __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + auto __len = std::min(__last1 - __first1, __last2 - __first2); + return std::__mismatch(__first1, __first1 + __len, __first2, __pred, __proj1, __proj2); +} + template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _BinaryPredicate __pred) { - for (; __first1 != __last1 && __first2 != __last2; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) - break; - return pair<_InputIterator1, _InputIterator2>(__first1, __first2); + __identity __proj; + auto __res = std::__mismatch( + std::__unwrap_iter(__first1), + std::__unwrap_iter(__last1), + std::__unwrap_iter(__first2), + std::__unwrap_iter(__last2), + __pred, + __proj, + __proj); + return {std::__rewrap_iter(__first1, __res.first), std::__rewrap_iter(__first2, __res.second)}; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::mismatch(__first1, __last1, __first2, __last2, __equal_to()); } @@ -59,4 +212,6 @@ mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __fi _LIBCPP_END_NAMESPACE_STD +_LIBCPP_POP_MACROS + #endif // _LIBCPP___ALGORITHM_MISMATCH_H diff --git a/lib/libcxx/include/__algorithm/move.h b/lib/libcxx/include/__algorithm/move.h index dba6d487ff..1716d43e2a 100644 --- a/lib/libcxx/include/__algorithm/move.h +++ b/lib/libcxx/include/__algorithm/move.h @@ -16,7 +16,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -34,7 +34,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIte __move(_InIter __first, _Sent __last, _OutIter __result); template -struct __move_loop { +struct __move_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -95,9 +95,7 @@ struct __move_loop { __local_first = _Traits::__begin(++__segment_iterator); } } -}; -struct __move_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -109,7 +107,7 @@ struct __move_trivial { template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __move(_InIter __first, _Sent __last, _OutIter __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __move_loop<_AlgPolicy>, __move_trivial>( + return std::__copy_move_unwrap_iters<__move_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/move_backward.h b/lib/libcxx/include/__algorithm/move_backward.h index aeedf4241d..4beb7bdbaa 100644 --- a/lib/libcxx/include/__algorithm/move_backward.h +++ b/lib/libcxx/include/__algorithm/move_backward.h @@ -15,7 +15,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1 __move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result); template -struct __move_backward_loop { +struct __move_backward_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -104,9 +104,7 @@ struct __move_backward_loop { __local_last = _Traits::__end(--__segment_iterator); } } -}; -struct __move_backward_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -122,7 +120,7 @@ __move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _Bidirectiona std::is_copy_constructible<_BidirectionalIterator1>::value, "Iterators must be copy constructible."); - return std::__dispatch_copy_or_move<_AlgPolicy, __move_backward_loop<_AlgPolicy>, __move_backward_trivial>( + return std::__copy_move_unwrap_iters<__move_backward_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/none_of.h b/lib/libcxx/include/__algorithm/none_of.h index ce59187a3a..50841ba17c 100644 --- a/lib/libcxx/include/__algorithm/none_of.h +++ b/lib/libcxx/include/__algorithm/none_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool none_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/partial_sort.h b/lib/libcxx/include/__algorithm/partial_sort.h index 85a8fdc77a..7f8d0c4914 100644 --- a/lib/libcxx/include/__algorithm/partial_sort.h +++ b/lib/libcxx/include/__algorithm/partial_sort.h @@ -18,8 +18,8 @@ #include <__config> #include <__debug_utils/randomize_range.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/pop_heap.h b/lib/libcxx/include/__algorithm/pop_heap.h index 798a1d0993..6d23830097 100644 --- a/lib/libcxx/include/__algorithm/pop_heap.h +++ b/lib/libcxx/include/__algorithm/pop_heap.h @@ -17,8 +17,8 @@ #include <__assert> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/pstl.h b/lib/libcxx/include/__algorithm/pstl.h new file mode 100644 index 0000000000..0bb052b3f9 --- /dev/null +++ b/lib/libcxx/include/__algorithm/pstl.h @@ -0,0 +1,663 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_PSTL_H +#define _LIBCPP___ALGORITHM_PSTL_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 + +# include <__functional/operations.h> +# include <__iterator/cpp17_iterator_concepts.h> +# include <__iterator/iterator_traits.h> +# include <__pstl/backend.h> +# include <__pstl/dispatch.h> +# include <__pstl/handle_exception.h> +# include <__type_traits/enable_if.h> +# include <__type_traits/is_execution_policy.h> +# include <__type_traits/remove_cvref.h> +# include <__utility/forward.h> +# include <__utility/move.h> + +_LIBCPP_BEGIN_NAMESPACE_STD + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "any_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__any_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "all_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__all_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "none_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__none_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "copy(first, last, result) requires [first, last) to be ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardOutIterator, "copy(first, last, result) requires result to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "copy(first, last, result) requires result to be an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__copy, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "copy_n(first, n, result) requires first to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardOutIterator, "copy_n(first, n, result) requires result to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "copy_n(first, n, result) requires result to be an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__copy_n, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> +count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "count_if(first, last, pred) requires [first, last) to be ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__count_if, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> +count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "count(first, last, val) requires [first, last) to be ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__count, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal_3leg, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal_3leg, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + equal_to{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + equal_to{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__fill, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__fill_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find_if, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if_not requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find_if_not, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__for_each, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__func)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__for_each_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__size), std::move(__func)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__generate, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__gen)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__generate_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), std::move(__gen)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool +is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "is_partitioned requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__is_partitioned, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +merge(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _ForwardOutIterator __result, + _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__merge, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +merge(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__merge, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "move requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "move requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(std::move(*__first)), "move requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__move, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +replace_if(_ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _Pred __pred, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_if requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_if, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred), __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +replace(_ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + const _Tp& __old_value, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__replace, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __old_value, __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void replace_copy_if( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + _Pred __pred, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy_if requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy_if requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "replace_copy_if requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_copy_if, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + std::move(__pred), + __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void replace_copy( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + const _Tp& __old_value, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "replace_copy requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_copy, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + __old_value, + __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __middle, + _ForwardIterator __last, + _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "rotate_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "rotate_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "rotate_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__rotate_copy, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__middle), + std::move(__last), + std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +stable_sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__stable_sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +stable_sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__stable_sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + _UnaryOperation __op) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(__op(*__first)), "transform requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__transform, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + std::move(__op)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( + _ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardOutIterator __result, + _BinaryOperation __op) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(__op(*__first1, *__first2)), "transform requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__transform_binary, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__result), + std::move(__op)); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_PSTL_H diff --git a/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h b/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h deleted file mode 100644 index 4b1e0e61b5..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h +++ /dev/null @@ -1,152 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H -#define _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H - -#include <__algorithm/pstl_find.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_any_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional __any_of( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_any_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional { - auto __res = std::__find_if(__policy, __g_first, __g_last, __g_pred); - if (!__res) - return nullopt; - return *__res != __g_last; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__any_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_all_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__all_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_all_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional { - auto __res = std::__any_of(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __value) { - return !__g_pred(__value); - }); - if (!__res) - return nullopt; - return !*__res; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__all_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_none_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__none_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_none_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional { - auto __res = std::__any_of(__policy, __g_first, __g_last, __g_pred); - if (!__res) - return nullopt; - return !*__res; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__none_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H diff --git a/lib/libcxx/include/__algorithm/pstl_backend.h b/lib/libcxx/include/__algorithm/pstl_backend.h deleted file mode 100644 index 3af03ce2fb..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backend.h +++ /dev/null @@ -1,232 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKEND_H - -#include <__algorithm/pstl_backends/cpu_backend.h> -#include <__config> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -/* -TODO: Documentation of how backends work - -A PSTL parallel backend is a tag type to which the following functions are associated, at minimum: - - template - optional<__empty> __pstl_for_each(_Backend, _ExecutionPolicy&&, _Iterator __first, _Iterator __last, _Func __f); - - template - optional<_Iterator> __pstl_find_if(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> - __pstl_stable_sort(_Backend, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp); - - template - optional<_ForwardOutIterator> __pstl_merge(_Backend, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp); - - template - optional<_OutIterator> - __pstl_transform(_Backend, _InIterator __first, _InIterator __last, _OutIterator __result, _UnaryOperation __op); - - template - optional<_OutIterator> __pstl_transform(_InIterator1 __first1, - _InIterator2 __first2, - _InIterator1 __last1, - _OutIterator __result, - _BinaryOperation __op); - - template - optional<_Tp> __pstl_transform_reduce(_Backend, - _Iterator1 __first1, - _Iterator1 __last1, - _Iterator2 __first2, - _Iterator2 __last2, - _Tp __init, - _BinaryOperation1 __reduce, - _BinaryOperation2 __transform); - - template - optional<_Tp> __pstl_transform_reduce(_Backend, - _Iterator __first, - _Iterator __last, - _Tp __init, - _BinaryOperation __reduce, - _UnaryOperation __transform); - -// TODO: Complete this list - -The following functions are optional but can be provided. If provided, they are used by the corresponding -algorithms, otherwise they are implemented in terms of other algorithms. If none of the optional algorithms are -implemented, all the algorithms will eventually forward to the basis algorithms listed above: - - template - optional<__empty> __pstl_for_each_n(_Backend, _Iterator __first, _Size __n, _Func __f); - - template - optional __pstl_any_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional __pstl_all_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional __pstl_none_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional<_Iterator> __pstl_find(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<_Iterator> __pstl_find_if_not(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> __pstl_fill(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<__empty> __pstl_fill_n(_Backend, _Iterator __first, _SizeT __n, const _Tp& __value); - - template - optional<__empty> __pstl_generate(_Backend, _Iterator __first, _Iterator __last, _Generator __gen); - - template - optional<__empty> __pstl_is_partitioned(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> __pstl_generator_n(_Backend, _Iterator __first, _Size __n, _Generator __gen); - - template - optional<_OutIterator> __pstl_merge(_Backend, - _Iterator1 __first1, - _Iterator1 __last1, - _Iterator2 __first2, - _Iterator2 __last2, - _OutIterator __result, - _Comp __comp); - - template - optional<_OutIterator> __pstl_move(_Backend, _Iterator __first, _Iterator __last, _OutIterator __result); - - template - optional<_Tp> __pstl_reduce(_Backend, _Iterator __first, _Iterator __last, _Tp __init, _BinaryOperation __op); - - temlate - optional<__iter_value_type<_Iterator>> __pstl_reduce(_Backend, _Iterator __first, _Iterator __last); - - template - optional<__iter_diff_t<_Iterator>> __pstl_count(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<__iter_diff_t<_Iterator>> __pstl_count_if(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> - __pstl_replace(_Backend, _Iterator __first, _Iterator __last, const _Tp& __old_value, const _Tp& __new_value); - - template - optional<__empty> - __pstl_replace_if(_Backend, _Iterator __first, _Iterator __last, _Pred __pred, const _Tp& __new_value); - - template - optional<__empty> __pstl_replace_copy(_Backend, - _Iterator __first, - _Iterator __last, - _OutIterator __result, - const _Tp& __old_value, - const _Tp& __new_value); - - template - optional<__empty> __pstl_replace_copy_if(_Backend, - _Iterator __first, - _Iterator __last, - _OutIterator __result, - _Pred __pred, - const _Tp& __new_value); - - template - optional<_Iterator> __pstl_rotate_copy( - _Backend, _Iterator __first, _Iterator __middle, _Iterator __last, _OutIterator __result); - - template - optional<__empty> __pstl_sort(_Backend, _Iterator __first, _Iterator __last, _Comp __comp); - - template - optional __pstl_equal(_Backend, _Iterator1 first1, _Iterator1 last1, _Iterator2 first2, _Comp __comp); - -// TODO: Complete this list - -Exception handling -================== - -PSTL backends are expected to report errors (i.e. failure to allocate) by returning a disengaged `optional` from their -implementation. Exceptions shouldn't be used to report an internal failure-to-allocate, since all exceptions are turned -into a program termination at the front-end level. When a backend returns a disengaged `optional` to the frontend, the -frontend will turn that into a call to `std::__throw_bad_alloc();` to report the internal failure to the user. -*/ - -template -struct __select_backend; - -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -# if _LIBCPP_STD_VER >= 20 -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; -# endif - -# if defined(_LIBCPP_PSTL_CPU_BACKEND_SERIAL) || defined(_LIBCPP_PSTL_CPU_BACKEND_THREAD) || \ - defined(_LIBCPP_PSTL_CPU_BACKEND_LIBDISPATCH) -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -# else - -// ...New vendors can add parallel backends here... - -# error "Invalid choice of a PSTL parallel backend" -# endif - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h deleted file mode 100644 index 6980ded189..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h +++ /dev/null @@ -1,68 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H - -#include <__config> - -/* - - // _Functor takes a subrange for [__first, __last) that should be executed in serial - template - optional<__empty> __parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Functor __func); - - template - optional<_Tp> - __parallel_transform_reduce(_Iterator __first, _Iterator __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduction); - - // Cancel the execution of other jobs - they aren't needed anymore - void __cancel_execution(); - - template - optional __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge); - - template - void __parallel_stable_sort(_RandomAccessIterator __first, - _RandomAccessIterator __last, - _Comp __comp, - _LeafSort __leaf_sort); - - TODO: Document the parallel backend - -Exception handling -================== - -CPU backends are expected to report errors (i.e. failure to allocate) by returning a disengaged `optional` from their -implementation. Exceptions shouldn't be used to report an internal failure-to-allocate, since all exceptions are turned -into a program termination at the front-end level. When a backend returns a disengaged `optional` to the frontend, the -frontend will turn that into a call to `std::__throw_bad_alloc();` to report the internal failure to the user. -*/ - -#include <__algorithm/pstl_backends/cpu_backends/any_of.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/pstl_backends/cpu_backends/fill.h> -#include <__algorithm/pstl_backends/cpu_backends/find_if.h> -#include <__algorithm/pstl_backends/cpu_backends/for_each.h> -#include <__algorithm/pstl_backends/cpu_backends/merge.h> -#include <__algorithm/pstl_backends/cpu_backends/stable_sort.h> -#include <__algorithm/pstl_backends/cpu_backends/transform.h> -#include <__algorithm/pstl_backends/cpu_backends/transform_reduce.h> - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h deleted file mode 100644 index ea2210a4a7..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h +++ /dev/null @@ -1,41 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H - -#include <__config> -#include - -#if defined(_LIBCPP_PSTL_CPU_BACKEND_SERIAL) -# include <__algorithm/pstl_backends/cpu_backends/serial.h> -#elif defined(_LIBCPP_PSTL_CPU_BACKEND_THREAD) -# include <__algorithm/pstl_backends/cpu_backends/thread.h> -#elif defined(_LIBCPP_PSTL_CPU_BACKEND_LIBDISPATCH) -# include <__algorithm/pstl_backends/cpu_backends/libdispatch.h> -#else -# error "Invalid CPU backend choice" -#endif - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -struct __cpu_backend_tag {}; - -inline constexpr size_t __lane_size = 64; - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h deleted file mode 100644 index 64babe9fd2..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h +++ /dev/null @@ -1,62 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H - -#include <__algorithm/fill.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Index __simd_fill_n(_Index __first, _DifferenceType __n, const _Tp& __value) noexcept { - _PSTL_USE_NONTEMPORAL_STORES_IF_ALLOWED - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __first[__i] = __value; - return __first + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_fill(__cpu_backend_tag, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return __par_backend::__parallel_for( - __first, __last, [&__value](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - [[maybe_unused]] auto __res = std::__pstl_fill<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __value); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::__simd_fill_n(__first, __last - __first, __value); - return __empty{}; - } else { - std::fill(__first, __last, __value); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h deleted file mode 100644 index 81fd4526b8..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h +++ /dev/null @@ -1,62 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H - -#include <__algorithm/for_each.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Iterator __simd_walk(_Iterator __first, _DifferenceType __n, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first[__i]); - - return __first + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_for_each(__cpu_backend_tag, _ForwardIterator __first, _ForwardIterator __last, _Functor __func) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return std::__par_backend::__parallel_for( - __first, __last, [__func](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - [[maybe_unused]] auto __res = std::__pstl_for_each<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __func); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::__simd_walk(__first, __last - __first, __func); - return __empty{}; - } else { - std::for_each(__first, __last, __func); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h deleted file mode 100644 index e885e7f225..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h +++ /dev/null @@ -1,347 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H - -#include <__algorithm/inplace_merge.h> -#include <__algorithm/lower_bound.h> -#include <__algorithm/max.h> -#include <__algorithm/merge.h> -#include <__algorithm/upper_bound.h> -#include <__atomic/atomic.h> -#include <__config> -#include <__exception/terminate.h> -#include <__iterator/iterator_traits.h> -#include <__iterator/move_iterator.h> -#include <__memory/allocator.h> -#include <__memory/construct_at.h> -#include <__memory/unique_ptr.h> -#include <__numeric/reduce.h> -#include <__utility/empty.h> -#include <__utility/exception_guard.h> -#include <__utility/move.h> -#include <__utility/pair.h> -#include -#include -#include - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __libdispatch { - -// ::dispatch_apply is marked as __attribute__((nothrow)) because it doesn't let exceptions propagate, and neither do -// we. -// TODO: Do we want to add [[_Clang::__callback__(__func, __context, __)]]? -_LIBCPP_EXPORTED_FROM_ABI void -__dispatch_apply(size_t __chunk_count, void* __context, void (*__func)(void* __context, size_t __chunk)) noexcept; - -template -_LIBCPP_HIDE_FROM_ABI void __dispatch_apply(size_t __chunk_count, _Func __func) noexcept { - __libdispatch::__dispatch_apply(__chunk_count, &__func, [](void* __context, size_t __chunk) { - (*static_cast<_Func*>(__context))(__chunk); - }); -} - -struct __chunk_partitions { - ptrdiff_t __chunk_count_; // includes the first chunk - ptrdiff_t __chunk_size_; - ptrdiff_t __first_chunk_size_; -}; - -[[__gnu__::__const__]] _LIBCPP_EXPORTED_FROM_ABI __chunk_partitions __partition_chunks(ptrdiff_t __size) noexcept; - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__dispatch_parallel_for(__chunk_partitions __partitions, _RandomAccessIterator __first, _Functor __func) { - // Perform the chunked execution. - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __chunk) { - auto __this_chunk_size = __chunk == 0 ? __partitions.__first_chunk_size_ : __partitions.__chunk_size_; - auto __index = - __chunk == 0 - ? 0 - : (__chunk * __partitions.__chunk_size_) + (__partitions.__first_chunk_size_ - __partitions.__chunk_size_); - __func(__first + __index, __first + __index + __this_chunk_size); - }); - - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Functor __func) { - return __libdispatch::__dispatch_parallel_for( - __libdispatch::__partition_chunks(__last - __first), std::move(__first), std::move(__func)); -} - -template -struct __merge_range { - __merge_range(_RandomAccessIterator1 __mid1, _RandomAccessIterator2 __mid2, _RandomAccessIteratorOut __result) - : __mid1_(__mid1), __mid2_(__mid2), __result_(__result) {} - - _RandomAccessIterator1 __mid1_; - _RandomAccessIterator2 __mid2_; - _RandomAccessIteratorOut __result_; -}; - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __result, - _Compare __comp, - _LeafMerge __leaf_merge) noexcept { - __chunk_partitions __partitions = - __libdispatch::__partition_chunks(std::max(__last1 - __first1, __last2 - __first2)); - - if (__partitions.__chunk_count_ == 0) - return __empty{}; - - if (__partitions.__chunk_count_ == 1) { - __leaf_merge(__first1, __last1, __first2, __last2, __result, __comp); - return __empty{}; - } - - using __merge_range_t = __merge_range<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3>; - auto const __n_ranges = __partitions.__chunk_count_ + 1; - - // TODO: use __uninitialized_buffer - auto __destroy = [=](__merge_range_t* __ptr) { - std::destroy_n(__ptr, __n_ranges); - std::allocator<__merge_range_t>().deallocate(__ptr, __n_ranges); - }; - - unique_ptr<__merge_range_t[], decltype(__destroy)> __ranges( - [&]() -> __merge_range_t* { -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS - try { -# endif - return std::allocator<__merge_range_t>().allocate(__n_ranges); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS - } catch (const std::bad_alloc&) { - return nullptr; - } -# endif - }(), - __destroy); - - if (!__ranges) - return nullopt; - - // TODO: Improve the case where the smaller range is merged into just a few (or even one) chunks of the larger case - __merge_range_t* __r = __ranges.get(); - std::__construct_at(__r++, __first1, __first2, __result); - - bool __iterate_first_range = __last1 - __first1 > __last2 - __first2; - - auto __compute_chunk = [&](size_t __chunk_size) -> __merge_range_t { - auto [__mid1, __mid2] = [&] { - if (__iterate_first_range) { - auto __m1 = __first1 + __chunk_size; - auto __m2 = std::lower_bound(__first2, __last2, __m1[-1], __comp); - return std::make_pair(__m1, __m2); - } else { - auto __m2 = __first2 + __chunk_size; - auto __m1 = std::lower_bound(__first1, __last1, __m2[-1], __comp); - return std::make_pair(__m1, __m2); - } - }(); - - __result += (__mid1 - __first1) + (__mid2 - __first2); - __first1 = __mid1; - __first2 = __mid2; - return {std::move(__mid1), std::move(__mid2), __result}; - }; - - // handle first chunk - std::__construct_at(__r++, __compute_chunk(__partitions.__first_chunk_size_)); - - // handle 2 -> N - 1 chunks - for (ptrdiff_t __i = 0; __i != __partitions.__chunk_count_ - 2; ++__i) - std::__construct_at(__r++, __compute_chunk(__partitions.__chunk_size_)); - - // handle last chunk - std::__construct_at(__r, __last1, __last2, __result); - - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __index) { - auto __first_iters = __ranges[__index]; - auto __last_iters = __ranges[__index + 1]; - __leaf_merge( - __first_iters.__mid1_, - __last_iters.__mid1_, - __first_iters.__mid2_, - __last_iters.__mid2_, - __first_iters.__result_, - __comp); - }); - - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Value> __parallel_transform_reduce( - _RandomAccessIterator __first, - _RandomAccessIterator __last, - _Transform __transform, - _Value __init, - _Combiner __combiner, - _Reduction __reduction) { - if (__first == __last) - return __init; - - auto __partitions = __libdispatch::__partition_chunks(__last - __first); - - auto __destroy = [__count = __partitions.__chunk_count_](_Value* __ptr) { - std::destroy_n(__ptr, __count); - std::allocator<_Value>().deallocate(__ptr, __count); - }; - - // TODO: use __uninitialized_buffer - // TODO: allocate one element per worker instead of one element per chunk - unique_ptr<_Value[], decltype(__destroy)> __values( - std::allocator<_Value>().allocate(__partitions.__chunk_count_), __destroy); - - // __dispatch_apply is noexcept - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __chunk) { - auto __this_chunk_size = __chunk == 0 ? __partitions.__first_chunk_size_ : __partitions.__chunk_size_; - auto __index = - __chunk == 0 - ? 0 - : (__chunk * __partitions.__chunk_size_) + (__partitions.__first_chunk_size_ - __partitions.__chunk_size_); - if (__this_chunk_size != 1) { - std::__construct_at( - __values.get() + __chunk, - __reduction(__first + __index + 2, - __first + __index + __this_chunk_size, - __combiner(__transform(__first + __index), __transform(__first + __index + 1)))); - } else { - std::__construct_at(__values.get() + __chunk, __transform(__first + __index)); - } - }); - - return std::reduce( - std::make_move_iterator(__values.get()), - std::make_move_iterator(__values.get() + __partitions.__chunk_count_), - std::move(__init), - __combiner); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp, _LeafSort __leaf_sort) { - const auto __size = __last - __first; - auto __partitions = __libdispatch::__partition_chunks(__size); - - if (__partitions.__chunk_count_ == 0) - return __empty{}; - - if (__partitions.__chunk_count_ == 1) { - __leaf_sort(__first, __last, __comp); - return __empty{}; - } - - using _Value = __iter_value_type<_RandomAccessIterator>; - - auto __destroy = [__size](_Value* __ptr) { - std::destroy_n(__ptr, __size); - std::allocator<_Value>().deallocate(__ptr, __size); - }; - - // TODO: use __uninitialized_buffer - unique_ptr<_Value[], decltype(__destroy)> __values(std::allocator<_Value>().allocate(__size), __destroy); - - // Initialize all elements to a moved-from state - // TODO: Don't do this - this can be done in the first merge - see https://llvm.org/PR63928 - std::__construct_at(__values.get(), std::move(*__first)); - for (__iter_diff_t<_RandomAccessIterator> __i = 1; __i != __size; ++__i) { - std::__construct_at(__values.get() + __i, std::move(__values.get()[__i - 1])); - } - *__first = std::move(__values.get()[__size - 1]); - - __libdispatch::__dispatch_parallel_for( - __partitions, - __first, - [&__leaf_sort, &__comp](_RandomAccessIterator __chunk_first, _RandomAccessIterator __chunk_last) { - __leaf_sort(std::move(__chunk_first), std::move(__chunk_last), __comp); - }); - - bool __objects_are_in_buffer = false; - do { - const auto __old_chunk_size = __partitions.__chunk_size_; - if (__partitions.__chunk_count_ % 2 == 1) { - auto __inplace_merge_chunks = [&__comp, &__partitions](auto __first_chunk_begin) { - std::inplace_merge( - __first_chunk_begin, - __first_chunk_begin + __partitions.__first_chunk_size_, - __first_chunk_begin + __partitions.__first_chunk_size_ + __partitions.__chunk_size_, - __comp); - }; - if (__objects_are_in_buffer) - __inplace_merge_chunks(__values.get()); - else - __inplace_merge_chunks(__first); - __partitions.__first_chunk_size_ += 2 * __partitions.__chunk_size_; - } else { - __partitions.__first_chunk_size_ += __partitions.__chunk_size_; - } - - __partitions.__chunk_size_ *= 2; - __partitions.__chunk_count_ /= 2; - - auto __merge_chunks = [__partitions, __old_chunk_size, &__comp](auto __from_first, auto __to_first) { - __libdispatch::__dispatch_parallel_for( - __partitions, - __from_first, - [__old_chunk_size, &__from_first, &__to_first, &__comp](auto __chunk_first, auto __chunk_last) { - std::merge(std::make_move_iterator(__chunk_first), - std::make_move_iterator(__chunk_last - __old_chunk_size), - std::make_move_iterator(__chunk_last - __old_chunk_size), - std::make_move_iterator(__chunk_last), - __to_first + (__chunk_first - __from_first), - __comp); - }); - }; - - if (__objects_are_in_buffer) - __merge_chunks(__values.get(), __first); - else - __merge_chunks(__first, __values.get()); - __objects_are_in_buffer = !__objects_are_in_buffer; - } while (__partitions.__chunk_count_ > 1); - - if (__objects_are_in_buffer) { - std::move(__values.get(), __values.get() + __size, __first); - } - - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -} // namespace __libdispatch -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h deleted file mode 100644 index b0db70f58b..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h +++ /dev/null @@ -1,85 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H - -#include <__algorithm/merge.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_merge( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - auto __res = __par_backend::__parallel_merge( - __first1, - __last1, - __first2, - __last2, - __result, - __comp, - [](_ForwardIterator1 __g_first1, - _ForwardIterator1 __g_last1, - _ForwardIterator2 __g_first2, - _ForwardIterator2 __g_last2, - _ForwardOutIterator __g_result, - _Comp __g_comp) { - [[maybe_unused]] auto __g_res = std::__pstl_merge<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - std::move(__g_last2), - std::move(__g_result), - std::move(__g_comp)); - _LIBCPP_ASSERT_INTERNAL(__g_res, "unsed/sed should never try to allocate!"); - }); - if (!__res) - return nullopt; - return __result + (__last1 - __first1) + (__last2 - __first2); - } else { - return std::merge(__first1, __last1, __first2, __last2, __result, __comp); - } -} - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h deleted file mode 100644 index afcc7ffb26..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h +++ /dev/null @@ -1,83 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H - -#include <__config> -#include <__utility/empty.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __serial_cpu_backend { - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Fp __f) { - __f(__first, __last); - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> -__parallel_transform_reduce(_Index __first, _Index __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduce __reduce) { - return __reduce(std::move(__first), std::move(__last), std::move(__init)); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, _LeafSort __leaf_sort) { - __leaf_sort(__first, __last, __comp); - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge) { - __leaf_merge(__first1, __last1, __first2, __last2, __outit, __comp); - return __empty{}; -} - -// TODO: Complete this list - -} // namespace __serial_cpu_backend -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h deleted file mode 100644 index 34c423586c..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/stable_sort.h> -#include <__config> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_stable_sort(__cpu_backend_tag, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy>) { - return __par_backend::__parallel_stable_sort( - __first, __last, __comp, [](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) { - std::stable_sort(__g_first, __g_last, __g_comp); - }); - } else { - std::stable_sort(__first, __last, __comp); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h deleted file mode 100644 index eb11a961b7..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h +++ /dev/null @@ -1,84 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H - -#include <__assert> -#include <__config> -#include <__utility/empty.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -// This backend implementation is for testing purposes only and not meant for production use. This will be replaced -// by a proper implementation once the PSTL implementation is somewhat stable. - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __thread_cpu_backend { - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Fp __f) { - __f(__first, __last); - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> -__parallel_transform_reduce(_Index __first, _Index __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduce __reduce) { - return __reduce(std::move(__first), std::move(__last), std::move(__init)); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, _LeafSort __leaf_sort) { - __leaf_sort(__first, __last, __comp); - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge) { - __leaf_merge(__first1, __last1, __first2, __last2, __outit, __comp); - return __empty{}; -} - -} // namespace __thread_cpu_backend -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h deleted file mode 100644 index fdf1a2e78d..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h +++ /dev/null @@ -1,138 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/transform.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Iterator2 -__simd_walk(_Iterator1 __first1, _DifferenceType __n, _Iterator2 __first2, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first1[__i], __first2[__i]); - return __first2 + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_transform( - __cpu_backend_tag, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _UnaryOperation __op) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - std::__par_backend::__parallel_for( - __first, __last, [__op, __first, __result](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - auto __res = std::__pstl_transform<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __result + (__brick_first - __first), __op); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - return *std::move(__res); - }); - return __result + (__last - __first); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - return std::__simd_walk( - __first, - __last - __first, - __result, - [&](__iter_reference<_ForwardIterator> __in_value, __iter_reference<_ForwardOutIterator> __out_value) { - __out_value = __op(__in_value); - }); - } else { - return std::transform(__first, __last, __result, __op); - } -} - -template -_LIBCPP_HIDE_FROM_ABI _Iterator3 __simd_walk( - _Iterator1 __first1, _DifferenceType __n, _Iterator2 __first2, _Iterator3 __first3, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first1[__i], __first2[__i], __first3[__i]); - return __first3 + __n; -} -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_transform( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardOutIterator __result, - _BinaryOperation __op) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - auto __res = std::__par_backend::__parallel_for( - __first1, - __last1, - [__op, __first1, __first2, __result](_ForwardIterator1 __brick_first, _ForwardIterator1 __brick_last) { - return std::__pstl_transform<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - __brick_first, - __brick_last, - __first2 + (__brick_first - __first1), - __result + (__brick_first - __first1), - __op); - }); - if (!__res) - return nullopt; - return __result + (__last1 - __first1); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - return std::__simd_walk( - __first1, - __last1 - __first1, - __first2, - __result, - [&](__iter_reference<_ForwardIterator1> __in1, - __iter_reference<_ForwardIterator2> __in2, - __iter_reference<_ForwardOutIterator> __out_value) { __out_value = __op(__in1, __in2); }); - } else { - return std::transform(__first1, __last1, __first2, __result, __op); - } -} - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h deleted file mode 100644 index 14a0d76741..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h +++ /dev/null @@ -1,202 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/transform_reduce.h> -#include <__type_traits/is_arithmetic.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/operation_traits.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - __enable_if_t<__desugars_to<__plus_tag, _BinaryOperation, _Tp, _UnaryResult>::value && is_arithmetic_v<_Tp> && - is_arithmetic_v<_UnaryResult>, - int> = 0> -_LIBCPP_HIDE_FROM_ABI _Tp -__simd_transform_reduce(_DifferenceType __n, _Tp __init, _BinaryOperation, _UnaryOperation __f) noexcept { - _PSTL_PRAGMA_SIMD_REDUCTION(+ : __init) - for (_DifferenceType __i = 0; __i < __n; ++__i) - __init += __f(__i); - return __init; -} - -template , - __enable_if_t::value && - is_arithmetic_v<_Tp> && is_arithmetic_v<_UnaryResult>), - int> = 0> -_LIBCPP_HIDE_FROM_ABI _Tp -__simd_transform_reduce(_Size __n, _Tp __init, _BinaryOperation __binary_op, _UnaryOperation __f) noexcept { - const _Size __block_size = __lane_size / sizeof(_Tp); - if (__n > 2 * __block_size && __block_size > 1) { - alignas(__lane_size) char __lane_buffer[__lane_size]; - _Tp* __lane = reinterpret_cast<_Tp*>(__lane_buffer); - - // initializer - _PSTL_PRAGMA_SIMD - for (_Size __i = 0; __i < __block_size; ++__i) { - ::new (__lane + __i) _Tp(__binary_op(__f(__i), __f(__block_size + __i))); - } - // main loop - _Size __i = 2 * __block_size; - const _Size __last_iteration = __block_size * (__n / __block_size); - for (; __i < __last_iteration; __i += __block_size) { - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __block_size; ++__j) { - __lane[__j] = __binary_op(std::move(__lane[__j]), __f(__i + __j)); - } - } - // remainder - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __n - __last_iteration; ++__j) { - __lane[__j] = __binary_op(std::move(__lane[__j]), __f(__last_iteration + __j)); - } - // combiner - for (_Size __j = 0; __j < __block_size; ++__j) { - __init = __binary_op(std::move(__init), std::move(__lane[__j])); - } - // destroyer - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __block_size; ++__j) { - __lane[__j].~_Tp(); - } - } else { - for (_Size __i = 0; __i < __n; ++__i) { - __init = __binary_op(std::move(__init), __f(__i)); - } - } - return __init; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> __pstl_transform_reduce( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _Tp __init, - _BinaryOperation1 __reduce, - _BinaryOperation2 __transform) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value) { - return __par_backend::__parallel_transform_reduce( - __first1, - std::move(__last1), - [__first1, __first2, __transform](_ForwardIterator1 __iter) { - return __transform(*__iter, *(__first2 + (__iter - __first1))); - }, - std::move(__init), - std::move(__reduce), - [__first1, __first2, __reduce, __transform]( - _ForwardIterator1 __brick_first, _ForwardIterator1 __brick_last, _Tp __brick_init) { - return *std::__pstl_transform_reduce<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - __brick_first, - std::move(__brick_last), - __first2 + (__brick_first - __first1), - std::move(__brick_init), - std::move(__reduce), - std::move(__transform)); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value) { - return std::__simd_transform_reduce( - __last1 - __first1, std::move(__init), std::move(__reduce), [&](__iter_diff_t<_ForwardIterator1> __i) { - return __transform(__first1[__i], __first2[__i]); - }); - } else { - return std::transform_reduce( - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__init), - std::move(__reduce), - std::move(__transform)); - } -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> __pstl_transform_reduce( - __cpu_backend_tag, - _ForwardIterator __first, - _ForwardIterator __last, - _Tp __init, - _BinaryOperation __reduce, - _UnaryOperation __transform) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return __par_backend::__parallel_transform_reduce( - std::move(__first), - std::move(__last), - [__transform](_ForwardIterator __iter) { return __transform(*__iter); }, - std::move(__init), - __reduce, - [__transform, __reduce](auto __brick_first, auto __brick_last, _Tp __brick_init) { - auto __res = std::__pstl_transform_reduce<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - std::move(__brick_first), - std::move(__brick_last), - std::move(__brick_init), - std::move(__reduce), - std::move(__transform)); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - return *std::move(__res); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return std::__simd_transform_reduce( - __last - __first, - std::move(__init), - std::move(__reduce), - [=, &__transform](__iter_diff_t<_ForwardIterator> __i) { return __transform(__first[__i]); }); - } else { - return std::transform_reduce( - std::move(__first), std::move(__last), std::move(__init), std::move(__reduce), std::move(__transform)); - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H diff --git a/lib/libcxx/include/__algorithm/pstl_copy.h b/lib/libcxx/include/__algorithm/pstl_copy.h deleted file mode 100644 index 1069dcec0e..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_copy.h +++ /dev/null @@ -1,121 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_COPY_H -#define _LIBCPP___ALGORITHM_PSTL_COPY_H - -#include <__algorithm/copy_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__functional/identity.h> -#include <__iterator/concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/is_trivially_copyable.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -// TODO: Use the std::copy/move shenanigans to forward to std::memmove - -template -void __pstl_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__copy(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) { - return std::__transform(__policy, __g_first, __g_last, __g_result, __identity()); - }, - std::move(__first), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { - auto __res = std::__copy(__policy, std::move(__first), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_copy_n(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __copy_n( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy_n, _RawPolicy), - [&__policy]( - _ForwardIterator __g_first, _Size __g_n, _ForwardOutIterator __g_result) -> optional<_ForwardIterator> { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) - return std::__copy(__policy, std::move(__g_first), std::move(__g_first + __g_n), std::move(__g_result)); - else - return std::copy_n(__g_first, __g_n, __g_result); - }, - std::move(__first), - std::move(__n), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) { - auto __res = std::__copy_n(__policy, std::move(__first), std::move(__n), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_COPY_H diff --git a/lib/libcxx/include/__algorithm/pstl_count.h b/lib/libcxx/include/__algorithm/pstl_count.h deleted file mode 100644 index 2781f6bfd3..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_count.h +++ /dev/null @@ -1,121 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_COUNT_H -#define _LIBCPP___ALGORITHM_PSTL_COUNT_H - -#include <__algorithm/count.h> -#include <__algorithm/for_each.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__atomic/atomic.h> -#include <__config> -#include <__functional/operations.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/pstl_transform_reduce.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_count_if(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> __count_if( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - using __diff_t = __iter_diff_t<_ForwardIterator>; - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count_if, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<__diff_t> { - return std::__transform_reduce( - __policy, - std::move(__g_first), - std::move(__g_last), - __diff_t(), - std::plus{}, - [&](__iter_reference<_ForwardIterator> __element) -> bool { return __g_pred(__element); }); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> -count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - auto __res = std::__count_if(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_count(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> -__count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) - -> optional<__iter_diff_t<_ForwardIterator>> { - return std::count_if(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __v) { - return __v == __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> -count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - auto __res = std::__count(__policy, std::move(__first), std::move(__last), __value); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_COUNT_H diff --git a/lib/libcxx/include/__algorithm/pstl_equal.h b/lib/libcxx/include/__algorithm/pstl_equal.h deleted file mode 100644 index d235c0f4f4..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_equal.h +++ /dev/null @@ -1,175 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_EQUAL_H -#define _LIBCPP___ALGORITHM_PSTL_EQUAL_H - -#include <__algorithm/equal.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__functional/operations.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/pstl_transform_reduce.h> -#include <__utility/move.h> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_equal(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__equal(_ExecutionPolicy&& __policy, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy), - [&__policy]( - _ForwardIterator1 __g_first1, _ForwardIterator1 __g_last1, _ForwardIterator2 __g_first2, _Pred __g_pred) { - return std::__transform_reduce( - __policy, - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - true, - std::logical_and{}, - std::move(__g_pred)); - }, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _Pred __pred) { - auto __res = std::__equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { - return std::equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::equal_to{}); -} - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__equal(_ExecutionPolicy&& __policy, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _ForwardIterator2&& __last2, - _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy), - [&__policy](_ForwardIterator1 __g_first1, - _ForwardIterator1 __g_last1, - _ForwardIterator2 __g_first2, - _ForwardIterator2 __g_last2, - _Pred __g_pred) -> optional { - if constexpr (__has_random_access_iterator_category<_ForwardIterator1>::value && - __has_random_access_iterator_category<_ForwardIterator2>::value) { - if (__g_last1 - __g_first1 != __g_last2 - __g_first2) - return false; - return std::__equal( - __policy, std::move(__g_first1), std::move(__g_last1), std::move(__g_first2), std::move(__g_pred)); - } else { - (void)__policy; // Avoid unused lambda capture warning - return std::equal( - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - std::move(__g_last2), - std::move(__g_pred)); - } - }, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _Pred __pred) { - auto __res = std::__equal( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2) { - return std::equal( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::equal_to{}); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_EQUAL_H diff --git a/lib/libcxx/include/__algorithm/pstl_fill.h b/lib/libcxx/include/__algorithm/pstl_fill.h deleted file mode 100644 index 488b49a0fe..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_fill.h +++ /dev/null @@ -1,116 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FILL_H -#define _LIBCPP___ALGORITHM_PSTL_FILL_H - -#include <__algorithm/fill_n.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_fill(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) { - return std::__for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__fill(__policy, std::move(__first), std::move(__last), __value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_fill_n(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__fill_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _SizeT&& __n, const _Tp& __value) noexcept { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill_n, _RawPolicy), - [&](_ForwardIterator __g_first, _SizeT __g_n, const _Tp& __g_value) { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) - std::fill(__policy, __g_first, __g_first + __g_n, __g_value); - else - std::fill_n(__g_first, __g_n, __g_value); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__n), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _SizeT __n, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__fill_n(__policy, std::move(__first), std::move(__n), __value)) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FILL_H diff --git a/lib/libcxx/include/__algorithm/pstl_find.h b/lib/libcxx/include/__algorithm/pstl_find.h deleted file mode 100644 index 5b694db68a..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_find.h +++ /dev/null @@ -1,141 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FIND_H -#define _LIBCPP___ALGORITHM_PSTL_FIND_H - -#include <__algorithm/comp.h> -#include <__algorithm/find.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find_if(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_find_if<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find_if(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_find_if_not(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find_if_not, _RawPolicy), - [&](_ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Predicate&& __g_pred) - -> optional<__remove_cvref_t<_ForwardIterator>> { - return std::__find_if( - __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __value) { - return !__g_pred(__value); - }); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find_if_not(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_find(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) -> optional<_ForwardIterator> { - return std::find_if( - __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __element) { - return __element == __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find(__policy, std::move(__first), std::move(__last), __value); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FIND_H diff --git a/lib/libcxx/include/__algorithm/pstl_for_each.h b/lib/libcxx/include/__algorithm/pstl_for_each.h deleted file mode 100644 index bb7b5a61a6..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_for_each.h +++ /dev/null @@ -1,108 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H -#define _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H - -#include <__algorithm/for_each.h> -#include <__algorithm/for_each_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__type_traits/void_t.h> -#include <__utility/empty.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__for_each(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Function&& __func) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_for_each<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__func)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__for_each(__policy, std::move(__first), std::move(__last), std::move(__func))) - std::__throw_bad_alloc(); -} - -template -void __pstl_for_each_n(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __size, _Function&& __func) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_for_each_n, _RawPolicy), - [&](_ForwardIterator __g_first, _Size __g_size, _Function __g_func) -> optional<__empty> { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::for_each(__policy, std::move(__g_first), __g_first + __g_size, std::move(__g_func)); - return __empty{}; - } else { - std::for_each_n(std::move(__g_first), __g_size, std::move(__g_func)); - return __empty{}; - } - }, - std::move(__first), - std::move(__size), - std::move(__func)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__for_each_n(__policy, std::move(__first), std::move(__size), std::move(__func)); - if (!__res) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H diff --git a/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h b/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h deleted file mode 100644 index 6fa1107491..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h +++ /dev/null @@ -1,44 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH -#define _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH - -#include <__config> -#include <__type_traits/is_callable.h> -#include <__utility/forward.h> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -# define _LIBCPP_PSTL_CUSTOMIZATION_POINT(name, policy) \ - [](auto&&... __args) -> decltype(std::name( \ - typename __select_backend::type{}, std::forward(__args)...)) { \ - return std::name(typename __select_backend::type{}, std::forward(__args)...); \ - } - -template -_LIBCPP_HIDE_FROM_ABI decltype(auto) -__pstl_frontend_dispatch(_SpecializedImpl __specialized_impl, _GenericImpl __generic_impl, _Args&&... __args) { - if constexpr (__is_callable<_SpecializedImpl, _Args...>::value) { - return __specialized_impl(std::forward<_Args>(__args)...); - } else { - return __generic_impl(std::forward<_Args>(__args)...); - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH diff --git a/lib/libcxx/include/__algorithm/pstl_generate.h b/lib/libcxx/include/__algorithm/pstl_generate.h deleted file mode 100644 index 7133c6f4f4..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_generate.h +++ /dev/null @@ -1,114 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_GENERATE_H -#define _LIBCPP___ALGORITHM_PSTL_GENERATE_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_generate(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__generate(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Generator&& __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Generator __g_gen) { - return std::__for_each( - __policy, std::move(__g_first), std::move(__g_last), [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_gen(); - }); - }, - std::move(__first), - std::move(__last), - std::move(__gen)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__generate(__policy, std::move(__first), std::move(__last), std::move(__gen))) - std::__throw_bad_alloc(); -} - -template -void __pstl_generate_n(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__generate_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _Generator&& __gen) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate_n, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _Size __g_n, _Generator __g_gen) { - return std::__for_each_n( - __policy, std::move(__g_first), std::move(__g_n), [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_gen(); - }); - }, - std::move(__first), - __n, - std::move(__gen)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__generate_n(__policy, std::move(__first), std::move(__n), std::move(__gen))) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_GENERATE_H diff --git a/lib/libcxx/include/__algorithm/pstl_is_partitioned.h b/lib/libcxx/include/__algorithm/pstl_is_partitioned.h deleted file mode 100644 index b654302122..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_is_partitioned.h +++ /dev/null @@ -1,77 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED -#define _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED - -#include <__algorithm/pstl_any_all_none_of.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_find.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_is_partitioned(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional __is_partitioned( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_is_partitioned, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) { - __g_first = std::find_if_not(__policy, __g_first, __g_last, __g_pred); - if (__g_first == __g_last) - return true; - ++__g_first; - return std::none_of(__policy, __g_first, __g_last, __g_pred); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - auto __res = std::__is_partitioned(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED diff --git a/lib/libcxx/include/__algorithm/pstl_merge.h b/lib/libcxx/include/__algorithm/pstl_merge.h deleted file mode 100644 index 3d262db6bc..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_merge.h +++ /dev/null @@ -1,92 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_MERGE_H -#define _LIBCPP___ALGORITHM_PSTL_MERGE_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__merge(_ExecutionPolicy&&, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp = {}) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_merge<_RawPolicy>( - _Backend{}, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__result), - std::move(__comp)); -} - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -merge(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp = {}) { - auto __res = std::__merge( - __policy, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__result), - std::move(__comp)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_MERGE_H diff --git a/lib/libcxx/include/__algorithm/pstl_move.h b/lib/libcxx/include/__algorithm/pstl_move.h deleted file mode 100644 index d8441f1a6c..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_move.h +++ /dev/null @@ -1,84 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_MOVE_H -#define _LIBCPP___ALGORITHM_PSTL_MOVE_H - -#include <__algorithm/copy_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__functional/identity.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/is_trivially_copyable.h> -#include <__type_traits/remove_cvref.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -// TODO: Use the std::copy/move shenanigans to forward to std::memmove -// Investigate whether we want to still forward to std::transform(policy) -// in that case for the execution::par part, or whether we actually want -// to run everything serially in that case. - -template -void __pstl_move(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__move(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_move, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) { - return std::__transform(__policy, __g_first, __g_last, __g_result, [](auto&& __v) { return std::move(__v); }); - }, - std::move(__first), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { - auto __res = std::__move(__policy, std::move(__first), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_MOVE_H diff --git a/lib/libcxx/include/__algorithm/pstl_replace.h b/lib/libcxx/include/__algorithm/pstl_replace.h deleted file mode 100644 index b1caf3fd4a..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_replace.h +++ /dev/null @@ -1,247 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_REPLACE_H -#define _LIBCPP___ALGORITHM_PSTL_REPLACE_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_replace_if(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__replace_if(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _Pred&& __pred, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_if, _RawPolicy), - [&__policy]( - _ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Pred&& __g_pred, const _Tp& __g_new_value) { - std::for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) { - if (__g_pred(__element)) - __element = __g_new_value; - }); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__last), - std::move(__pred), - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -replace_if(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _Pred __pred, - const _Tp& __new_value) { - auto __res = std::__replace_if(__policy, std::move(__first), std::move(__last), std::move(__pred), __new_value); - if (!__res) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__replace(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - const _Tp& __old_value, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace, _RawPolicy), - [&__policy]( - _ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_old_value, const _Tp& __g_new_value) { - return std::__replace_if( - __policy, - std::move(__g_first), - std::move(__g_last), - [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; }, - __g_new_value); - }, - std::move(__first), - std::move(__last), - __old_value, - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -replace(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - const _Tp& __old_value, - const _Tp& __new_value) { - if (!std::__replace(__policy, std::move(__first), std::move(__last), __old_value, __new_value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace_copy_if(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy_if( - _ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - _Pred&& __pred, - const _Tp& __new_value) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy_if, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result, - _Pred __g_pred, - const _Tp& __g_new_value) -> optional<__empty> { - if (!std::__transform( - __policy, __g_first, __g_last, __g_result, [&](__iter_reference<_ForwardIterator> __element) { - return __g_pred(__element) ? __g_new_value : __element; - })) - return nullopt; - return __empty{}; - }, - std::move(__first), - std::move(__last), - std::move(__result), - std::move(__pred), - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void replace_copy_if( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _Pred __pred, - const _Tp& __new_value) { - if (!std::__replace_copy_if( - __policy, std::move(__first), std::move(__last), std::move(__result), std::move(__pred), __new_value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - const _Tp& __old_value, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result, - const _Tp& __g_old_value, - const _Tp& __g_new_value) { - return std::__replace_copy_if( - __policy, - std::move(__g_first), - std::move(__g_last), - std::move(__g_result), - [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; }, - __g_new_value); - }, - std::move(__first), - std::move(__last), - std::move(__result), - __old_value, - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void replace_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - const _Tp& __old_value, - const _Tp& __new_value) { - if (!std::__replace_copy( - __policy, std::move(__first), std::move(__last), std::move(__result), __old_value, __new_value)) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_REPLACE_H diff --git a/lib/libcxx/include/__algorithm/pstl_rotate_copy.h b/lib/libcxx/include/__algorithm/pstl_rotate_copy.h deleted file mode 100644 index 346aab1d4a..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_rotate_copy.h +++ /dev/null @@ -1,85 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H -#define _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_copy.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__type_traits/is_execution_policy.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_rotate_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__rotate_copy(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __middle, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_rotate_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_middle, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result) -> optional<_ForwardOutIterator> { - auto __result_mid = - std::__copy(__policy, _ForwardIterator(__g_middle), std::move(__g_last), std::move(__g_result)); - if (!__result_mid) - return nullopt; - return std::__copy(__policy, std::move(__g_first), std::move(__g_middle), *std::move(__result_mid)); - }, - std::move(__first), - std::move(__middle), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __middle, - _ForwardIterator __last, - _ForwardOutIterator __result) { - auto __res = - std::__rotate_copy(__policy, std::move(__first), std::move(__middle), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H diff --git a/lib/libcxx/include/__algorithm/pstl_sort.h b/lib/libcxx/include/__algorithm/pstl_sort.h deleted file mode 100644 index a931f76811..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_sort.h +++ /dev/null @@ -1,82 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_SORT_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_stable_sort.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/empty.h> -#include <__utility/forward.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_sort(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __sort( - _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_sort, _RawPolicy), - [&__policy](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) { - std::stable_sort(__policy, std::move(__g_first), std::move(__g_last), std::move(__g_comp)); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__last), - std::move(__comp)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { - if (!std::__sort(__policy, std::move(__first), std::move(__last), std::move(__comp))) - std::__throw_bad_alloc(); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { - std::sort(std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_stable_sort.h b/lib/libcxx/include/__algorithm/pstl_stable_sort.h deleted file mode 100644 index 8ea0bb3f9a..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_stable_sort.h +++ /dev/null @@ -1,61 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/empty.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __stable_sort( - _ExecutionPolicy&&, _RandomAccessIterator&& __first, _RandomAccessIterator&& __last, _Comp&& __comp = {}) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_stable_sort<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__comp)); -} - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void stable_sort( - _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp = {}) { - if (!std::__stable_sort(__policy, std::move(__first), std::move(__last), std::move(__comp))) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_transform.h b/lib/libcxx/include/__algorithm/pstl_transform.h deleted file mode 100644 index f95938782f..0000000000 --- a/lib/libcxx/include/__algorithm/pstl_transform.h +++ /dev/null @@ -1,120 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H -#define _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>> -__transform(_ExecutionPolicy&&, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - _UnaryOperation&& __op) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_transform<_RawPolicy>( - _Backend{}, std::move(__first), std::move(__last), std::move(__result), std::move(__op)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _UnaryOperation __op) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator); - _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(__op(*__first))); - auto __res = std::__transform(__policy, std::move(__first), std::move(__last), std::move(__result), std::move(__op)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>> -__transform(_ExecutionPolicy&&, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _ForwardOutIterator&& __result, - _BinaryOperation&& __op) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_transform<_RawPolicy>( - _Backend{}, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( - _ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardOutIterator __result, - _BinaryOperation __op) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator); - _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(__op(*__first1, *__first2))); - auto __res = std::__transform( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H diff --git a/lib/libcxx/include/__algorithm/push_heap.h b/lib/libcxx/include/__algorithm/push_heap.h index 7d8720e3a9..ec0b445f2b 100644 --- a/lib/libcxx/include/__algorithm/push_heap.h +++ b/lib/libcxx/include/__algorithm/push_heap.h @@ -14,8 +14,8 @@ #include <__algorithm/iterator_operations.h> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/ranges_adjacent_find.h b/lib/libcxx/include/__algorithm/ranges_adjacent_find.h index a10b04167e..3c54f72331 100644 --- a/lib/libcxx/include/__algorithm/ranges_adjacent_find.h +++ b/lib/libcxx/include/__algorithm/ranges_adjacent_find.h @@ -53,7 +53,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_binary_predicate, projected<_Iter, _Proj>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __adjacent_find_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -62,7 +62,7 @@ struct __fn { class _Proj = identity, indirect_binary_predicate, _Proj>, projected, _Proj>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Pred __pred = {}, _Proj __proj = {}) const { return __adjacent_find_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_all_of.h b/lib/libcxx/include/__algorithm/ranges_all_of.h index 8976541d59..2f603b32f3 100644 --- a/lib/libcxx/include/__algorithm/ranges_all_of.h +++ b/lib/libcxx/include/__algorithm/ranges_all_of.h @@ -45,7 +45,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return __all_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -53,7 +53,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __all_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_any_of.h b/lib/libcxx/include/__algorithm/ranges_any_of.h index 7c775f5f64..205fcecc08 100644 --- a/lib/libcxx/include/__algorithm/ranges_any_of.h +++ b/lib/libcxx/include/__algorithm/ranges_any_of.h @@ -45,7 +45,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __any_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -53,7 +53,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __any_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_binary_search.h b/lib/libcxx/include/__algorithm/ranges_binary_search.h index f3b7842d5c..1ef2bd62b5 100644 --- a/lib/libcxx/include/__algorithm/ranges_binary_search.h +++ b/lib/libcxx/include/__algorithm/ranges_binary_search.h @@ -39,7 +39,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__lower_bound<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj); return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__ret)); @@ -49,7 +49,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); diff --git a/lib/libcxx/include/__algorithm/ranges_clamp.h b/lib/libcxx/include/__algorithm/ranges_clamp.h index f5ef5fd7f2..e6181ef943 100644 --- a/lib/libcxx/include/__algorithm/ranges_clamp.h +++ b/lib/libcxx/include/__algorithm/ranges_clamp.h @@ -35,7 +35,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Type& operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Type& operator()( const _Type& __value, const _Type& __low, const _Type& __high, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( !bool(std::invoke(__comp, std::invoke(__proj, __high), std::invoke(__proj, __low))), diff --git a/lib/libcxx/include/__algorithm/ranges_contains.h b/lib/libcxx/include/__algorithm/ranges_contains.h index 00d0e54019..4836c3baed 100644 --- a/lib/libcxx/include/__algorithm/ranges_contains.h +++ b/lib/libcxx/include/__algorithm/ranges_contains.h @@ -37,14 +37,14 @@ namespace __contains { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) { return ranges::find(std::move(__first), __last, __value, std::ref(__proj)) != __last; } template requires indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) { return ranges::find(ranges::begin(__range), ranges::end(__range), __value, std::ref(__proj)) != ranges::end(__range); diff --git a/lib/libcxx/include/__algorithm/ranges_contains_subrange.h b/lib/libcxx/include/__algorithm/ranges_contains_subrange.h new file mode 100644 index 0000000000..4398c457fd --- /dev/null +++ b/lib/libcxx/include/__algorithm/ranges_contains_subrange.h @@ -0,0 +1,97 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H +#define _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H + +#include <__algorithm/ranges_search.h> +#include <__config> +#include <__functional/identity.h> +#include <__functional/ranges_operations.h> +#include <__functional/reference_wrapper.h> +#include <__iterator/concepts.h> +#include <__iterator/indirectly_comparable.h> +#include <__iterator/projected.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/size.h> +#include <__ranges/subrange.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if _LIBCPP_STD_VER >= 23 + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { +namespace __contains_subrange { +struct __fn { + template _Sent1, + forward_iterator _Iter2, + sentinel_for<_Iter2> _Sent2, + class _Pred = ranges::equal_to, + class _Proj1 = identity, + class _Proj2 = identity> + requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()( + _Iter1 __first1, + _Sent1 __last1, + _Iter2 __first2, + _Sent2 __last2, + _Pred __pred = {}, + _Proj1 __proj1 = {}, + _Proj2 __proj2 = {}) { + if (__first2 == __last2) + return true; + + auto __ret = ranges::search( + std::move(__first1), __last1, std::move(__first2), __last2, __pred, std::ref(__proj1), std::ref(__proj2)); + return __ret.empty() == false; + } + + template + requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static + operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) { + if constexpr (sized_range<_Range2>) { + if (ranges::size(__range2) == 0) + return true; + } else { + if (ranges::begin(__range2) == ranges::end(__range2)) + return true; + } + + auto __ret = ranges::search(__range1, __range2, __pred, std::ref(__proj1), std::ref(__proj2)); + return __ret.empty() == false; + } +}; +} // namespace __contains_subrange + +inline namespace __cpo { +inline constexpr auto contains_subrange = __contains_subrange::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER >= 23 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H diff --git a/lib/libcxx/include/__algorithm/ranges_count.h b/lib/libcxx/include/__algorithm/ranges_count.h index a8965c1b96..4f35117438 100644 --- a/lib/libcxx/include/__algorithm/ranges_count.h +++ b/lib/libcxx/include/__algorithm/ranges_count.h @@ -38,14 +38,14 @@ namespace __count { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) const { return std::__count<_RangeAlgPolicy>(std::move(__first), std::move(__last), __value, __proj); } template requires indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> operator()(_Range&& __r, const _Type& __value, _Proj __proj = {}) const { return std::__count<_RangeAlgPolicy>(ranges::begin(__r), ranges::end(__r), __value, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_count_if.h b/lib/libcxx/include/__algorithm/ranges_count_if.h index 71b942dd53..5f2396ff7d 100644 --- a/lib/libcxx/include/__algorithm/ranges_count_if.h +++ b/lib/libcxx/include/__algorithm/ranges_count_if.h @@ -50,7 +50,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Predicate> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> operator()(_Iter __first, _Sent __last, _Predicate __pred, _Proj __proj = {}) const { return ranges::__count_if_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -58,7 +58,7 @@ struct __fn { template , _Proj>> _Predicate> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> operator()(_Range&& __r, _Predicate __pred, _Proj __proj = {}) const { return ranges::__count_if_impl(ranges::begin(__r), ranges::end(__r), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_ends_with.h b/lib/libcxx/include/__algorithm/ranges_ends_with.h index c2a3cae9f3..06efdef36b 100644 --- a/lib/libcxx/include/__algorithm/ranges_ends_with.h +++ b/lib/libcxx/include/__algorithm/ranges_ends_with.h @@ -39,7 +39,7 @@ namespace ranges { namespace __ends_with { struct __fn { template - static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl_bidirectional( + _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl_bidirectional( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -56,7 +56,7 @@ struct __fn { } template - static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl( + _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -65,7 +65,7 @@ struct __fn { _Proj1& __proj1, _Proj2& __proj2) { if constexpr (std::bidirectional_iterator<_Sent1> && std::bidirectional_iterator<_Sent2> && - (!std::random_access_iterator<_Sent1>)&&(!std::random_access_iterator<_Sent2>)) { + (!std::random_access_iterator<_Sent1>) && (!std::random_access_iterator<_Sent2>)) { return __ends_with_fn_impl_bidirectional(__first1, __last1, __first2, __last2, __pred, __proj1, __proj2); } else { @@ -133,7 +133,7 @@ struct __fn { requires(forward_iterator<_Iter1> || sized_sentinel_for<_Sent1, _Iter1>) && (forward_iterator<_Iter2> || sized_sentinel_for<_Sent2, _Iter2>) && indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -152,7 +152,7 @@ struct __fn { class _Proj2 = identity> requires(forward_range<_Range1> || sized_range<_Range1>) && (forward_range<_Range2> || sized_range<_Range2>) && indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { auto __n1 = ranges::size(__range1); diff --git a/lib/libcxx/include/__algorithm/ranges_equal.h b/lib/libcxx/include/__algorithm/ranges_equal.h index 31c7ee261d..edbd0e3641 100644 --- a/lib/libcxx/include/__algorithm/ranges_equal.h +++ b/lib/libcxx/include/__algorithm/ranges_equal.h @@ -44,7 +44,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -74,7 +74,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { if (ranges::distance(__range1) != ranges::distance(__range2)) diff --git a/lib/libcxx/include/__algorithm/ranges_equal_range.h b/lib/libcxx/include/__algorithm/ranges_equal_range.h index 4c1c3834ba..4a308e016b 100644 --- a/lib/libcxx/include/__algorithm/ranges_equal_range.h +++ b/lib/libcxx/include/__algorithm/ranges_equal_range.h @@ -46,7 +46,7 @@ struct __fn { class _Tp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, const _Tp& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__equal_range<_RangeAlgPolicy>(std::move(__first), std::move(__last), __value, __comp, __proj); return {std::move(__ret.first), std::move(__ret.second)}; @@ -56,7 +56,7 @@ struct __fn { class _Tp, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, const _Tp& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__equal_range<_RangeAlgPolicy>(ranges::begin(__range), ranges::end(__range), __value, __comp, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_find.h b/lib/libcxx/include/__algorithm/ranges_find.h index 7459fad717..6b0d5efe37 100644 --- a/lib/libcxx/include/__algorithm/ranges_find.h +++ b/lib/libcxx/include/__algorithm/ranges_find.h @@ -44,22 +44,22 @@ struct __fn { if constexpr (forward_iterator<_Iter>) { auto [__first_un, __last_un] = std::__unwrap_range(__first, std::move(__last)); return std::__rewrap_range<_Sent>( - std::move(__first), std::__find_impl(std::move(__first_un), std::move(__last_un), __value, __proj)); + std::move(__first), std::__find(std::move(__first_un), std::move(__last_un), __value, __proj)); } else { - return std::__find_impl(std::move(__first), std::move(__last), __value, __proj); + return std::__find(std::move(__first), std::move(__last), __value, __proj); } } template _Sp, class _Tp, class _Proj = identity> requires indirect_binary_predicate, const _Tp*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, const _Tp& __value, _Proj __proj = {}) const { return __find_unwrap(std::move(__first), std::move(__last), __value, __proj); } template requires indirect_binary_predicate, _Proj>, const _Tp*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, const _Tp& __value, _Proj __proj = {}) const { return __find_unwrap(ranges::begin(__r), ranges::end(__r), __value, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_find_end.h b/lib/libcxx/include/__algorithm/ranges_find_end.h index 0bda4f3e1c..e49e66dd4a 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_end.h +++ b/lib/libcxx/include/__algorithm/ranges_find_end.h @@ -45,7 +45,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -72,7 +72,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { auto __ret = std::__find_end_impl<_RangeAlgPolicy>( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_find_first_of.h b/lib/libcxx/include/__algorithm/ranges_find_first_of.h index 63a7b8335f..d92d9686bc 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_first_of.h +++ b/lib/libcxx/include/__algorithm/ranges_find_first_of.h @@ -60,7 +60,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter1 operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter1 operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -78,7 +78,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __find_first_of_impl( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_find_if.h b/lib/libcxx/include/__algorithm/ranges_find_if.h index 52ae55ce96..888f9ec3cb 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_if.h +++ b/lib/libcxx/include/__algorithm/ranges_find_if.h @@ -48,13 +48,13 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Pred __pred, _Proj __proj = {}) const { return ranges::__find_if_impl(std::move(__first), std::move(__last), __pred, __proj); } template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Pred __pred, _Proj __proj = {}) const { return ranges::__find_if_impl(ranges::begin(__r), ranges::end(__r), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_find_if_not.h b/lib/libcxx/include/__algorithm/ranges_find_if_not.h index 60c6796cbb..ec19545b5a 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_if_not.h +++ b/lib/libcxx/include/__algorithm/ranges_find_if_not.h @@ -40,14 +40,14 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Pred __pred, _Proj __proj = {}) const { auto __pred2 = [&](auto&& __e) -> bool { return !std::invoke(__pred, std::forward(__e)); }; return ranges::__find_if_impl(std::move(__first), std::move(__last), __pred2, __proj); } template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Pred __pred, _Proj __proj = {}) const { auto __pred2 = [&](auto&& __e) -> bool { return !std::invoke(__pred, std::forward(__e)); }; return ranges::__find_if_impl(ranges::begin(__r), ranges::end(__r), __pred2, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_find_last.h b/lib/libcxx/include/__algorithm/ranges_find_last.h new file mode 100644 index 0000000000..95f7e77b8c --- /dev/null +++ b/lib/libcxx/include/__algorithm/ranges_find_last.h @@ -0,0 +1,175 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H +#define _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H + +#include <__config> +#include <__functional/identity.h> +#include <__functional/invoke.h> +#include <__functional/ranges_operations.h> +#include <__iterator/concepts.h> +#include <__iterator/indirectly_comparable.h> +#include <__iterator/next.h> +#include <__iterator/prev.h> +#include <__iterator/projected.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/subrange.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if _LIBCPP_STD_VER >= 23 + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { + +template +_LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> +__find_last_impl(_Iter __first, _Sent __last, _Pred __pred, _Proj& __proj) { + if (__first == __last) { + return subrange<_Iter>(__first, __first); + } + + if constexpr (bidirectional_iterator<_Iter>) { + auto __last_it = ranges::next(__first, __last); + for (auto __it = ranges::prev(__last_it); __it != __first; --__it) { + if (__pred(std::invoke(__proj, *__it))) { + return subrange<_Iter>(std::move(__it), std::move(__last_it)); + } + } + if (__pred(std::invoke(__proj, *__first))) { + return subrange<_Iter>(std::move(__first), std::move(__last_it)); + } + return subrange<_Iter>(__last_it, __last_it); + } else { + bool __found = false; + _Iter __found_it; + for (; __first != __last; ++__first) { + if (__pred(std::invoke(__proj, *__first))) { + __found = true; + __found_it = __first; + } + } + + if (__found) { + return subrange<_Iter>(std::move(__found_it), std::move(__first)); + } else { + return subrange<_Iter>(__first, __first); + } + } +} + +namespace __find_last { +struct __fn { + template + struct __op { + const _Type& __value; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return std::forward<_Elem>(__elem) == __value; + } + }; + + template _Sent, class _Type, class _Proj = identity> + requires indirect_binary_predicate, const _Type*> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Type>{__value}, __proj); + } + + template + requires indirect_binary_predicate, _Proj>, const _Type*> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Type>{__value}, __proj); + } +}; +} // namespace __find_last + +namespace __find_last_if { +struct __fn { + template + struct __op { + _Pred& __pred; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return std::invoke(__pred, std::forward<_Elem>(__elem)); + } + }; + + template _Sent, + class _Proj = identity, + indirect_unary_predicate> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Pred>{__pred}, __proj); + } + + template , _Proj>> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Pred>{__pred}, __proj); + } +}; +} // namespace __find_last_if + +namespace __find_last_if_not { +struct __fn { + template + struct __op { + _Pred& __pred; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return !std::invoke(__pred, std::forward<_Elem>(__elem)); + } + }; + + template _Sent, + class _Proj = identity, + indirect_unary_predicate> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Pred>{__pred}, __proj); + } + + template , _Proj>> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Pred>{__pred}, __proj); + } +}; +} // namespace __find_last_if_not + +inline namespace __cpo { +inline constexpr auto find_last = __find_last::__fn{}; +inline constexpr auto find_last_if = __find_last_if::__fn{}; +inline constexpr auto find_last_if_not = __find_last_if_not::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER >= 23 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H diff --git a/lib/libcxx/include/__algorithm/ranges_includes.h b/lib/libcxx/include/__algorithm/ranges_includes.h index 0bc4c043bd..c4c3b8ed08 100644 --- a/lib/libcxx/include/__algorithm/ranges_includes.h +++ b/lib/libcxx/include/__algorithm/ranges_includes.h @@ -45,7 +45,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_strict_weak_order, projected<_Iter2, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -69,7 +69,7 @@ struct __fn { class _Proj2 = identity, indirect_strict_weak_order, _Proj1>, projected, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Comp __comp = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return std::__includes( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_is_heap.h b/lib/libcxx/include/__algorithm/ranges_is_heap.h index 122368c90d..3d9e18ce1d 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_heap.h +++ b/lib/libcxx/include/__algorithm/ranges_is_heap.h @@ -51,7 +51,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_fn_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -59,7 +59,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_heap_until.h b/lib/libcxx/include/__algorithm/ranges_is_heap_until.h index b2705d37a6..7a2e1fc770 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_heap_until.h +++ b/lib/libcxx/include/__algorithm/ranges_is_heap_until.h @@ -51,7 +51,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_until_fn_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -59,7 +59,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_until_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_partitioned.h b/lib/libcxx/include/__algorithm/ranges_is_partitioned.h index c6a585c9f5..5be6fba46f 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_partitioned.h +++ b/lib/libcxx/include/__algorithm/ranges_is_partitioned.h @@ -57,7 +57,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return __is_partitioned_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -65,7 +65,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __is_partitioned_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_permutation.h b/lib/libcxx/include/__algorithm/ranges_is_permutation.h index e0423d722b..1f8d67007a 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_permutation.h +++ b/lib/libcxx/include/__algorithm/ranges_is_permutation.h @@ -56,7 +56,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_equivalence_relation, projected<_Iter2, _Proj2>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -74,7 +74,7 @@ struct __fn { class _Proj2 = identity, indirect_equivalence_relation, _Proj1>, projected, _Proj2>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { if (ranges::distance(__range1) != ranges::distance(__range2)) diff --git a/lib/libcxx/include/__algorithm/ranges_is_sorted.h b/lib/libcxx/include/__algorithm/ranges_is_sorted.h index d71035d5aa..5b88d422b4 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_sorted.h +++ b/lib/libcxx/include/__algorithm/ranges_is_sorted.h @@ -37,7 +37,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(std::move(__first), __last, __comp, __proj) == __last; } @@ -45,7 +45,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { auto __last = ranges::end(__range); return ranges::__is_sorted_until_impl(ranges::begin(__range), __last, __comp, __proj) == __last; diff --git a/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h b/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h index dcfb6a4e18..54de530c8b 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h +++ b/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h @@ -53,7 +53,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -61,7 +61,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h b/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h index 90e96b5465..6d82017e30 100644 --- a/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h +++ b/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h @@ -60,7 +60,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_strict_weak_order, projected<_Iter2, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -78,7 +78,7 @@ struct __fn { class _Proj2 = identity, indirect_strict_weak_order, _Proj1>, projected, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Comp __comp = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __lexicographical_compare_impl( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_lower_bound.h b/lib/libcxx/include/__algorithm/ranges_lower_bound.h index ab1f80e7ab..0651147e04 100644 --- a/lib/libcxx/include/__algorithm/ranges_lower_bound.h +++ b/lib/libcxx/include/__algorithm/ranges_lower_bound.h @@ -43,7 +43,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { return std::__lower_bound<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj); } @@ -52,7 +52,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { return std::__lower_bound<_RangeAlgPolicy>(ranges::begin(__r), ranges::end(__r), __value, __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_max.h b/lib/libcxx/include/__algorithm/ranges_max.h index 0f89cb2ff5..d0ee6f314b 100644 --- a/lib/libcxx/include/__algorithm/ranges_max.h +++ b/lib/libcxx/include/__algorithm/ranges_max.h @@ -41,7 +41,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& operator()(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Comp __comp = {}, @@ -52,7 +52,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp operator()(initializer_list<_Tp> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list must contain at least one element"); @@ -65,7 +65,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Rp>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -98,6 +98,6 @@ _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS -#endif // _LIBCPP_STD_VER >= 20 && +#endif // _LIBCPP_STD_VER >= 20 #endif // _LIBCPP___ALGORITHM_RANGES_MAX_H diff --git a/lib/libcxx/include/__algorithm/ranges_max_element.h b/lib/libcxx/include/__algorithm/ranges_max_element.h index 83adf49b61..c577309271 100644 --- a/lib/libcxx/include/__algorithm/ranges_max_element.h +++ b/lib/libcxx/include/__algorithm/ranges_max_element.h @@ -38,7 +38,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); }; return ranges::__min_element_impl(__first, __last, __comp_lhs_rhs_swapped, __proj); @@ -47,7 +47,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); }; return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp_lhs_rhs_swapped, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_min.h b/lib/libcxx/include/__algorithm/ranges_min.h index 8757358cdf..cc569d2a06 100644 --- a/lib/libcxx/include/__algorithm/ranges_min.h +++ b/lib/libcxx/include/__algorithm/ranges_min.h @@ -40,7 +40,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& operator()(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Comp __comp = {}, @@ -51,7 +51,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp operator()(initializer_list<_Tp> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list must contain at least one element"); @@ -62,7 +62,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Rp>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -90,6 +90,6 @@ _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS -#endif // _LIBCPP_STD_VER >= 20 && +#endif // _LIBCPP_STD_VER >= 20 #endif // _LIBCPP___ALGORITHM_RANGES_MIN_H diff --git a/lib/libcxx/include/__algorithm/ranges_min_element.h b/lib/libcxx/include/__algorithm/ranges_min_element.h index 4b9cb76da5..588ef258e2 100644 --- a/lib/libcxx/include/__algorithm/ranges_min_element.h +++ b/lib/libcxx/include/__algorithm/ranges_min_element.h @@ -52,7 +52,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__min_element_impl(__first, __last, __comp, __proj); } @@ -60,7 +60,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_minmax.h b/lib/libcxx/include/__algorithm/ranges_minmax.h index 22a62b620c..09cbefd91a 100644 --- a/lib/libcxx/include/__algorithm/ranges_minmax.h +++ b/lib/libcxx/include/__algorithm/ranges_minmax.h @@ -23,7 +23,9 @@ #include <__iterator/projected.h> #include <__ranges/access.h> #include <__ranges/concepts.h> +#include <__type_traits/desugars_to.h> #include <__type_traits/is_reference.h> +#include <__type_traits/is_trivially_copyable.h> #include <__type_traits/remove_cvref.h> #include <__utility/forward.h> #include <__utility/move.h> @@ -50,7 +52,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result operator()(_LIBCPP_LIFETIMEBOUND const _Type& __a, _LIBCPP_LIFETIMEBOUND const _Type& __b, _Comp __comp = {}, @@ -63,7 +65,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result<_Type> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result<_Type> operator()(initializer_list<_Type> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list has to contain at least one element"); @@ -75,7 +77,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Range>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result> operator()(_Range&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -83,7 +85,20 @@ struct __fn { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__first != __last, "range has to contain at least one element"); - if constexpr (forward_range<_Range>) { + // This optimiation is not in minmax_element because clang doesn't see through the pointers and as a result doesn't + // vectorize the code. + if constexpr (contiguous_range<_Range> && is_integral_v<_ValueT> && + __is_cheap_to_copy<_ValueT> & __is_identity<_Proj>::value && + __desugars_to_v<__less_tag, _Comp, _ValueT, _ValueT>) { + minmax_result<_ValueT> __result = {__r[0], __r[0]}; + for (auto __e : __r) { + if (__e < __result.min) + __result.min = __e; + if (__result.max < __e) + __result.max = __e; + } + return __result; + } else if constexpr (forward_range<_Range>) { // Special-case the one element case. Avoid repeatedly initializing objects from the result of an iterator // dereference when doing so might not be idempotent. The `if constexpr` avoids the extra branch in cases where // it's not needed. diff --git a/lib/libcxx/include/__algorithm/ranges_minmax_element.h b/lib/libcxx/include/__algorithm/ranges_minmax_element.h index 5132856ebc..4bf6d2404e 100644 --- a/lib/libcxx/include/__algorithm/ranges_minmax_element.h +++ b/lib/libcxx/include/__algorithm/ranges_minmax_element.h @@ -46,7 +46,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result<_Ip> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result<_Ip> operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__minmax_element_impl(std::move(__first), std::move(__last), __comp, __proj); return {__ret.first, __ret.second}; @@ -55,7 +55,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__minmax_element_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj); return {__ret.first, __ret.second}; diff --git a/lib/libcxx/include/__algorithm/ranges_mismatch.h b/lib/libcxx/include/__algorithm/ranges_mismatch.h index 037af39126..c4bf0022a9 100644 --- a/lib/libcxx/include/__algorithm/ranges_mismatch.h +++ b/lib/libcxx/include/__algorithm/ranges_mismatch.h @@ -10,6 +10,8 @@ #define _LIBCPP___ALGORITHM_RANGES_MISMATCH_H #include <__algorithm/in_in_result.h> +#include <__algorithm/mismatch.h> +#include <__algorithm/unwrap_range.h> #include <__config> #include <__functional/identity.h> #include <__functional/invoke.h> @@ -42,13 +44,17 @@ struct __fn { template static _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> __go(_I1 __first1, _S1 __last1, _I2 __first2, _S2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { - while (__first1 != __last1 && __first2 != __last2) { - if (!std::invoke(__pred, std::invoke(__proj1, *__first1), std::invoke(__proj2, *__first2))) - break; - ++__first1; - ++__first2; + if constexpr (forward_iterator<_I1> && forward_iterator<_I2>) { + auto __range1 = std::__unwrap_range(__first1, __last1); + auto __range2 = std::__unwrap_range(__first2, __last2); + auto __res = + std::__mismatch(__range1.first, __range1.second, __range2.first, __range2.second, __pred, __proj1, __proj2); + return {std::__rewrap_range<_S1>(__first1, __res.first), std::__rewrap_range<_S2>(__first2, __res.second)}; + } else { + auto __res = std::__mismatch( + std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), __pred, __proj1, __proj2); + return {std::move(__res.first), std::move(__res.second)}; } - return {std::move(__first1), std::move(__first2)}; } template requires indirectly_comparable<_I1, _I2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> operator()( _I1 __first1, _S1 __last1, _I2 __first2, _S2 __last2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __go(std::move(__first1), __last1, std::move(__first2), __last2, __pred, __proj1, __proj2); @@ -71,8 +77,8 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_R2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result, - borrowed_iterator_t<_R2>> + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result, borrowed_iterator_t<_R2>> operator()(_R1&& __r1, _R2&& __r2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __go( ranges::begin(__r1), ranges::end(__r1), ranges::begin(__r2), ranges::end(__r2), __pred, __proj1, __proj2); diff --git a/lib/libcxx/include/__algorithm/ranges_none_of.h b/lib/libcxx/include/__algorithm/ranges_none_of.h index 59bd87997d..7df3c1829f 100644 --- a/lib/libcxx/include/__algorithm/ranges_none_of.h +++ b/lib/libcxx/include/__algorithm/ranges_none_of.h @@ -46,7 +46,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __none_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -54,7 +54,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __none_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_remove.h b/lib/libcxx/include/__algorithm/ranges_remove.h index 315bed8fba..17c3a2c5cd 100644 --- a/lib/libcxx/include/__algorithm/ranges_remove.h +++ b/lib/libcxx/include/__algorithm/ranges_remove.h @@ -37,7 +37,7 @@ namespace __remove { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) const { auto __pred = [&](auto&& __other) -> bool { return __value == __other; }; return ranges::__remove_if_impl(std::move(__first), std::move(__last), __pred, __proj); @@ -46,7 +46,7 @@ struct __fn { template requires permutable> && indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) const { auto __pred = [&](auto&& __other) -> bool { return __value == __other; }; return ranges::__remove_if_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_remove_if.h b/lib/libcxx/include/__algorithm/ranges_remove_if.h index 943dbdd738..0ea5d9a01b 100644 --- a/lib/libcxx/include/__algorithm/ranges_remove_if.h +++ b/lib/libcxx/include/__algorithm/ranges_remove_if.h @@ -59,7 +59,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return ranges::__remove_if_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -68,7 +68,7 @@ struct __fn { class _Proj = identity, indirect_unary_predicate, _Proj>> _Pred> requires permutable> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return ranges::__remove_if_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_search.h b/lib/libcxx/include/__algorithm/ranges_search.h index ca2326e9ab..55294c6063 100644 --- a/lib/libcxx/include/__algorithm/ranges_search.h +++ b/lib/libcxx/include/__algorithm/ranges_search.h @@ -77,7 +77,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -94,7 +94,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { auto __first1 = ranges::begin(__range1); if constexpr (sized_range<_Range2>) { diff --git a/lib/libcxx/include/__algorithm/ranges_search_n.h b/lib/libcxx/include/__algorithm/ranges_search_n.h index 4c1d73d8e6..56e12755b9 100644 --- a/lib/libcxx/include/__algorithm/ranges_search_n.h +++ b/lib/libcxx/include/__algorithm/ranges_search_n.h @@ -71,7 +71,7 @@ struct __fn { class _Pred = ranges::equal_to, class _Proj = identity> requires indirectly_comparable<_Iter, const _Type*, _Pred, _Proj> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, iter_difference_t<_Iter> __count, @@ -83,7 +83,7 @@ struct __fn { template requires indirectly_comparable, const _Type*, _Pred, _Proj> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()( _Range&& __range, range_difference_t<_Range> __count, const _Type& __value, _Pred __pred = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__range); diff --git a/lib/libcxx/include/__algorithm/ranges_starts_with.h b/lib/libcxx/include/__algorithm/ranges_starts_with.h index 90e184aa9b..17084e4f24 100644 --- a/lib/libcxx/include/__algorithm/ranges_starts_with.h +++ b/lib/libcxx/include/__algorithm/ranges_starts_with.h @@ -42,14 +42,14 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred __pred = {}, _Proj1 __proj1 = {}, - _Proj2 __proj2 = {}) const { + _Proj2 __proj2 = {}) { return __mismatch::__fn::__go( std::move(__first1), std::move(__last1), @@ -67,8 +67,8 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( - _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool + operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) { return __mismatch::__fn::__go( ranges::begin(__range1), ranges::end(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_unique.h b/lib/libcxx/include/__algorithm/ranges_unique.h index 7340310eb3..7a9b784321 100644 --- a/lib/libcxx/include/__algorithm/ranges_unique.h +++ b/lib/libcxx/include/__algorithm/ranges_unique.h @@ -47,7 +47,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_equivalence_relation> _Comp = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__unique<_RangeAlgPolicy>(std::move(__first), std::move(__last), std::__make_projected(__comp, __proj)); @@ -58,7 +58,7 @@ struct __fn { class _Proj = identity, indirect_equivalence_relation, _Proj>> _Comp = ranges::equal_to> requires permutable> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__unique<_RangeAlgPolicy>( ranges::begin(__range), ranges::end(__range), std::__make_projected(__comp, __proj)); diff --git a/lib/libcxx/include/__algorithm/ranges_upper_bound.h b/lib/libcxx/include/__algorithm/ranges_upper_bound.h index 7b571fb344..fa6fa7f70e 100644 --- a/lib/libcxx/include/__algorithm/ranges_upper_bound.h +++ b/lib/libcxx/include/__algorithm/ranges_upper_bound.h @@ -37,7 +37,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](const auto& __lhs, const auto& __rhs) -> bool { return !std::invoke(__comp, __rhs, __lhs); @@ -50,7 +50,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](const auto& __lhs, const auto& __rhs) -> bool { return !std::invoke(__comp, __rhs, __lhs); diff --git a/lib/libcxx/include/__algorithm/remove.h b/lib/libcxx/include/__algorithm/remove.h index 1498852c43..fd01c23cb6 100644 --- a/lib/libcxx/include/__algorithm/remove.h +++ b/lib/libcxx/include/__algorithm/remove.h @@ -24,7 +24,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator remove(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { __first = std::find(__first, __last, __value); if (__first != __last) { diff --git a/lib/libcxx/include/__algorithm/remove_if.h b/lib/libcxx/include/__algorithm/remove_if.h index c77b78023f..b14f3c0efa 100644 --- a/lib/libcxx/include/__algorithm/remove_if.h +++ b/lib/libcxx/include/__algorithm/remove_if.h @@ -23,7 +23,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator remove_if(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { __first = std::find_if<_ForwardIterator, _Predicate&>(__first, __last, __pred); if (__first != __last) { diff --git a/lib/libcxx/include/__algorithm/rotate.h b/lib/libcxx/include/__algorithm/rotate.h index 9a4d07883e..df4ca95aac 100644 --- a/lib/libcxx/include/__algorithm/rotate.h +++ b/lib/libcxx/include/__algorithm/rotate.h @@ -15,7 +15,7 @@ #include <__algorithm/swap_ranges.h> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_trivially_move_assignable.h> +#include <__type_traits/is_trivially_assignable.h> #include <__utility/move.h> #include <__utility/pair.h> diff --git a/lib/libcxx/include/__algorithm/search.h b/lib/libcxx/include/__algorithm/search.h index 75f936d0f2..b82ca78095 100644 --- a/lib/libcxx/include/__algorithm/search.h +++ b/lib/libcxx/include/__algorithm/search.h @@ -117,17 +117,18 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __searc } } -template +template ::value && + __has_random_access_iterator_category<_Iter2>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( - _Iter1 __first1, - _Sent1 __last1, - _Iter2 __first2, - _Sent2 __last2, - _Pred& __pred, - _Proj1& __proj1, - _Proj2& __proj2, - __enable_if_t<__has_random_access_iterator_category<_Iter1>::value && - __has_random_access_iterator_category<_Iter2>::value>* = nullptr) { + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { auto __size2 = __last2 - __first2; if (__size2 == 0) return std::make_pair(__first1, __first1); @@ -141,23 +142,25 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __searc __first1, __last1, __first2, __last2, __pred, __proj1, __proj2, __size1, __size2); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( - _Iter1 __first1, - _Sent1 __last1, - _Iter2 __first2, - _Sent2 __last2, - _Pred& __pred, - _Proj1& __proj1, - _Proj2& __proj2, +template < + class _Iter1, + class _Sent1, + class _Iter2, + class _Sent2, + class _Pred, + class _Proj1, + class _Proj2, __enable_if_t<__has_forward_iterator_category<_Iter1>::value && __has_forward_iterator_category<_Iter2>::value && - !(__has_random_access_iterator_category<_Iter1>::value && - __has_random_access_iterator_category<_Iter2>::value)>* = nullptr) { + !(__has_random_access_iterator_category<_Iter1>::value && + __has_random_access_iterator_category<_Iter2>::value), + int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { return std::__search_forward_impl<_ClassicAlgPolicy>(__first1, __last1, __first2, __last2, __pred, __proj1, __proj2); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -170,14 +173,14 @@ search(_ForwardIterator1 __first1, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::search(__first1, __last1, __first2, __last2, __equal_to()); } #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search(_ForwardIterator __f, _ForwardIterator __l, const _Searcher& __s) { return __s(__f, __l).first; } diff --git a/lib/libcxx/include/__algorithm/search_n.h b/lib/libcxx/include/__algorithm/search_n.h index c3c01e700b..771647d316 100644 --- a/lib/libcxx/include/__algorithm/search_n.h +++ b/lib/libcxx/include/__algorithm/search_n.h @@ -108,34 +108,35 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 std::pair<_Iter, _Iter> __se } } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter, _Iter> __search_n_impl( - _Iter __first, - _Sent __last, - _DiffT __count, - const _Type& __value, - _Pred& __pred, - _Proj& __proj, - __enable_if_t<__has_random_access_iterator_category<_Iter>::value>* = nullptr) { +template ::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter, _Iter> +__search_n_impl(_Iter __first, _Sent __last, _DiffT __count, const _Type& __value, _Pred& __pred, _Proj& __proj) { return std::__search_n_random_access_impl<_ClassicAlgPolicy>( __first, __last, __count, __value, __pred, __proj, __last - __first); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_n_impl( - _Iter1 __first, - _Sent1 __last, - _DiffT __count, - const _Type& __value, - _Pred& __pred, - _Proj& __proj, - __enable_if_t<__has_forward_iterator_category<_Iter1>::value && - !__has_random_access_iterator_category<_Iter1>::value>* = nullptr) { +template ::value && + !__has_random_access_iterator_category<_Iter1>::value, + int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> +__search_n_impl(_Iter1 __first, _Sent1 __last, _DiffT __count, const _Type& __value, _Pred& __pred, _Proj& __proj) { return std::__search_n_forward_impl<_ClassicAlgPolicy>(__first, __last, __count, __value, __pred, __proj); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n( _ForwardIterator __first, _ForwardIterator __last, _Size __count, const _Tp& __value, _BinaryPredicate __pred) { static_assert( __is_callable<_BinaryPredicate, decltype(*__first), const _Tp&>::value, "BinaryPredicate has to be callable"); @@ -144,7 +145,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n(_ForwardIterator __first, _ForwardIterator __last, _Size __count, const _Tp& __value) { return std::search_n(__first, __last, std::__convert_to_integral(__count), __value, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/set_intersection.h b/lib/libcxx/include/__algorithm/set_intersection.h index 73d888d1b0..bb0d86cd0f 100644 --- a/lib/libcxx/include/__algorithm/set_intersection.h +++ b/lib/libcxx/include/__algorithm/set_intersection.h @@ -12,10 +12,15 @@ #include <__algorithm/comp.h> #include <__algorithm/comp_ref_type.h> #include <__algorithm/iterator_operations.h> +#include <__algorithm/lower_bound.h> #include <__config> +#include <__functional/identity.h> #include <__iterator/iterator_traits.h> #include <__iterator/next.h> +#include <__type_traits/is_same.h> +#include <__utility/exchange.h> #include <__utility/move.h> +#include <__utility/swap.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -38,10 +43,103 @@ struct __set_intersection_result { : __in1_(std::move(__in_iter1)), __in2_(std::move(__in_iter2)), __out_(std::move(__out_iter)) {} }; -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InIter1, _InIter2, _OutIter> +// Helper for __set_intersection() with one-sided binary search: populate result and advance input iterators if they +// are found to potentially contain the same value in two consecutive calls. This function is very intimately related to +// the way it is used and doesn't attempt to abstract that, it's not appropriate for general usage outside of its +// context. +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_intersection_add_output_if_equal( + bool __may_be_equal, + _InForwardIter1& __first1, + _InForwardIter2& __first2, + _OutIter& __result, + bool& __prev_may_be_equal) { + if (__may_be_equal && __prev_may_be_equal) { + *__result = *__first1; + ++__result; + ++__first1; + ++__first2; + __prev_may_be_equal = false; + } else { + __prev_may_be_equal = __may_be_equal; + } +} + +// With forward iterators we can make multiple passes over the data, allowing the use of one-sided binary search to +// reduce best-case complexity to log(N). Understanding how we can use binary search and still respect complexity +// guarantees is _not_ straightforward: the guarantee is "at most 2*(N+M)-1 comparisons", and one-sided binary search +// will necessarily overshoot depending on the position of the needle in the haystack -- for instance, if we're +// searching for 3 in (1, 2, 3, 4), we'll check if 3<1, then 3<2, then 3<4, and, finally, 3<3, for a total of 4 +// comparisons, when linear search would have yielded 3. However, because we won't need to perform the intervening +// reciprocal comparisons (ie 1<3, 2<3, 4<3), that extra comparison doesn't run afoul of the guarantee. Additionally, +// this type of scenario can only happen for match distances of up to 5 elements, because 2*log2(8) is 6, and we'll +// still be worse-off at position 5 of an 8-element set. From then onwards these scenarios can't happen. TL;DR: we'll be +// 1 comparison worse-off compared to the classic linear-searching algorithm if matching position 3 of a set with 4 +// elements, or position 5 if the set has 7 or 8 elements, but we'll never exceed the complexity guarantees from the +// standard. +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InForwardIter1, _InForwardIter2, _OutIter> __set_intersection( - _InIter1 __first1, _Sent1 __last1, _InIter2 __first2, _Sent2 __last2, _OutIter __result, _Compare&& __comp) { + _InForwardIter1 __first1, + _Sent1 __last1, + _InForwardIter2 __first2, + _Sent2 __last2, + _OutIter __result, + _Compare&& __comp, + std::forward_iterator_tag, + std::forward_iterator_tag) { + _LIBCPP_CONSTEXPR std::__identity __proj; + bool __prev_may_be_equal = false; + + while (__first2 != __last2) { + _InForwardIter1 __first1_next = + std::__lower_bound_onesided<_AlgPolicy>(__first1, __last1, *__first2, __comp, __proj); + std::swap(__first1_next, __first1); + // keeping in mind that a==b iff !(a(__first2, __last2, *__first1, __comp, __proj); + std::swap(__first2_next, __first2); + std::__set_intersection_add_output_if_equal( + __first2 == __first2_next, __first1, __first2, __result, __prev_may_be_equal); + } + return __set_intersection_result<_InForwardIter1, _InForwardIter2, _OutIter>( + _IterOps<_AlgPolicy>::next(std::move(__first1), std::move(__last1)), + _IterOps<_AlgPolicy>::next(std::move(__first2), std::move(__last2)), + std::move(__result)); +} + +// input iterators are not suitable for multipass algorithms, so we stick to the classic single-pass version +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InInputIter1, _InInputIter2, _OutIter> +__set_intersection( + _InInputIter1 __first1, + _Sent1 __last1, + _InInputIter2 __first2, + _Sent2 __last2, + _OutIter __result, + _Compare&& __comp, + std::input_iterator_tag, + std::input_iterator_tag) { while (__first1 != __last1 && __first2 != __last2) { if (__comp(*__first1, *__first2)) ++__first1; @@ -55,12 +153,28 @@ __set_intersection( } } - return __set_intersection_result<_InIter1, _InIter2, _OutIter>( + return __set_intersection_result<_InInputIter1, _InInputIter2, _OutIter>( _IterOps<_AlgPolicy>::next(std::move(__first1), std::move(__last1)), _IterOps<_AlgPolicy>::next(std::move(__first2), std::move(__last2)), std::move(__result)); } +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InIter1, _InIter2, _OutIter> +__set_intersection( + _InIter1 __first1, _Sent1 __last1, _InIter2 __first2, _Sent2 __last2, _OutIter __result, _Compare&& __comp) { + return std::__set_intersection<_AlgPolicy>( + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + std::forward<_Compare>(__comp), + typename std::_IterOps<_AlgPolicy>::template __iterator_category<_InIter1>(), + typename std::_IterOps<_AlgPolicy>::template __iterator_category<_InIter2>()); +} + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator set_intersection( _InputIterator1 __first1, diff --git a/lib/libcxx/include/__algorithm/simd_utils.h b/lib/libcxx/include/__algorithm/simd_utils.h new file mode 100644 index 0000000000..549197be80 --- /dev/null +++ b/lib/libcxx/include/__algorithm/simd_utils.h @@ -0,0 +1,164 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_SIMD_UTILS_H +#define _LIBCPP___ALGORITHM_SIMD_UTILS_H + +#include <__algorithm/min.h> +#include <__bit/bit_cast.h> +#include <__bit/countl.h> +#include <__bit/countr.h> +#include <__config> +#include <__type_traits/is_arithmetic.h> +#include <__type_traits/is_same.h> +#include <__utility/integer_sequence.h> +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +// TODO: Find out how altivec changes things and allow vectorizations there too. +#if _LIBCPP_STD_VER >= 14 && defined(_LIBCPP_CLANG_VER) && !defined(__ALTIVEC__) +# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 1 +#else +# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 0 +#endif + +#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS && !defined(__OPTIMIZE_SIZE__) +# define _LIBCPP_VECTORIZE_ALGORITHMS 1 +#else +# define _LIBCPP_VECTORIZE_ALGORITHMS 0 +#endif + +#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +inline constexpr bool __can_map_to_integer_v = + sizeof(_Tp) == alignof(_Tp) && (sizeof(_Tp) == 1 || sizeof(_Tp) == 2 || sizeof(_Tp) == 4 || sizeof(_Tp) == 8); + +template +struct __get_as_integer_type_impl; + +template <> +struct __get_as_integer_type_impl<1> { + using type = uint8_t; +}; + +template <> +struct __get_as_integer_type_impl<2> { + using type = uint16_t; +}; +template <> +struct __get_as_integer_type_impl<4> { + using type = uint32_t; +}; +template <> +struct __get_as_integer_type_impl<8> { + using type = uint64_t; +}; + +template +using __get_as_integer_type_t = typename __get_as_integer_type_impl::type; + +// This isn't specialized for 64 byte vectors on purpose. They have the potential to significantly reduce performance +// in mixed simd/non-simd workloads and don't provide any performance improvement for currently vectorized algorithms +// as far as benchmarks are concerned. +# if defined(__AVX__) || defined(__MVS__) +template +inline constexpr size_t __native_vector_size = 32 / sizeof(_Tp); +# elif defined(__SSE__) || defined(__ARM_NEON__) +template +inline constexpr size_t __native_vector_size = 16 / sizeof(_Tp); +# elif defined(__MMX__) +template +inline constexpr size_t __native_vector_size = 8 / sizeof(_Tp); +# else +template +inline constexpr size_t __native_vector_size = 1; +# endif + +template +using __simd_vector __attribute__((__ext_vector_type__(_Np))) = _ArithmeticT; + +template +inline constexpr size_t __simd_vector_size_v = []() -> size_t { + static_assert(_False, "Not a vector!"); +}(); + +template +inline constexpr size_t __simd_vector_size_v<__simd_vector<_Tp, _Np>> = _Np; + +template +_LIBCPP_HIDE_FROM_ABI _Tp __simd_vector_underlying_type_impl(__simd_vector<_Tp, _Np>) { + return _Tp{}; +} + +template +using __simd_vector_underlying_type_t = decltype(std::__simd_vector_underlying_type_impl(_VecT{})); + +// This isn't inlined without always_inline when loading chars. +template +_LIBCPP_NODISCARD _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI _VecT __load_vector(_Iter __iter) noexcept { + return [=](index_sequence<_Indices...>) _LIBCPP_ALWAYS_INLINE noexcept { + return _VecT{__iter[_Indices]...}; + }(make_index_sequence<__simd_vector_size_v<_VecT>>{}); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool __all_of(__simd_vector<_Tp, _Np> __vec) noexcept { + return __builtin_reduce_and(__builtin_convertvector(__vec, __simd_vector)); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_set(__simd_vector<_Tp, _Np> __vec) noexcept { + using __mask_vec = __simd_vector; + + // This has MSan disabled du to https://github.com/llvm/llvm-project/issues/85876 + auto __impl = [&](_MaskT) _LIBCPP_NO_SANITIZE("memory") noexcept { +# if defined(_LIBCPP_BIG_ENDIAN) + return std::min( + _Np, std::__countl_zero(__builtin_bit_cast(_MaskT, __builtin_convertvector(__vec, __mask_vec)))); +# else + return std::min( + _Np, std::__countr_zero(__builtin_bit_cast(_MaskT, __builtin_convertvector(__vec, __mask_vec)))); +# endif + }; + + if constexpr (sizeof(__mask_vec) == sizeof(uint8_t)) { + return __impl(uint8_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint16_t)) { + return __impl(uint16_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint32_t)) { + return __impl(uint32_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint64_t)) { + return __impl(uint64_t{}); + } else { + static_assert(sizeof(__mask_vec) == 0, "unexpected required size for mask integer type"); + return 0; + } +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_not_set(__simd_vector<_Tp, _Np> __vec) noexcept { + return std::__find_first_set(~__vec); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_SIMD_UTILS_H diff --git a/lib/libcxx/include/__algorithm/sort.h b/lib/libcxx/include/__algorithm/sort.h index 8a5e0211cd..07b5814639 100644 --- a/lib/libcxx/include/__algorithm/sort.h +++ b/lib/libcxx/include/__algorithm/sort.h @@ -696,9 +696,8 @@ __partition_with_equals_on_left(_RandomAccessIterator __first, _RandomAccessIter using _Ops = _IterOps<_AlgPolicy>; typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; typedef typename std::iterator_traits<_RandomAccessIterator>::value_type value_type; - // TODO(LLVM18): Make __begin const, see https://reviews.llvm.org/D147089#4349748 - _RandomAccessIterator __begin = __first; // used for bounds checking, those are not moved around - const _RandomAccessIterator __end = __last; + const _RandomAccessIterator __begin = __first; // used for bounds checking, those are not moved around + const _RandomAccessIterator __end = __last; (void)__end; // value_type __pivot(_Ops::__iter_move(__first)); if (__comp(__pivot, *(__last - difference_type(1)))) { diff --git a/lib/libcxx/include/__algorithm/sort_heap.h b/lib/libcxx/include/__algorithm/sort_heap.h index 060fc33c3c..f20b110c7f 100644 --- a/lib/libcxx/include/__algorithm/sort_heap.h +++ b/lib/libcxx/include/__algorithm/sort_heap.h @@ -16,8 +16,8 @@ #include <__config> #include <__debug_utils/strict_weak_ordering_check.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/stable_sort.h b/lib/libcxx/include/__algorithm/stable_sort.h index 9be192bd65..726e7e16b3 100644 --- a/lib/libcxx/include/__algorithm/stable_sort.h +++ b/lib/libcxx/include/__algorithm/stable_sort.h @@ -20,7 +20,7 @@ #include <__memory/destruct_n.h> #include <__memory/temporary_buffer.h> #include <__memory/unique_ptr.h> -#include <__type_traits/is_trivially_copy_assignable.h> +#include <__type_traits/is_trivially_assignable.h> #include <__utility/move.h> #include <__utility/pair.h> #include diff --git a/lib/libcxx/include/__algorithm/three_way_comp_ref_type.h b/lib/libcxx/include/__algorithm/three_way_comp_ref_type.h index 70c5818976..5702a1fee0 100644 --- a/lib/libcxx/include/__algorithm/three_way_comp_ref_type.h +++ b/lib/libcxx/include/__algorithm/three_way_comp_ref_type.h @@ -9,6 +9,7 @@ #ifndef _LIBCPP___ALGORITHM_THREE_WAY_COMP_REF_TYPE_H #define _LIBCPP___ALGORITHM_THREE_WAY_COMP_REF_TYPE_H +#include <__assert> #include <__compare/ordering.h> #include <__config> #include <__utility/declval.h> diff --git a/lib/libcxx/include/__algorithm/unique.h b/lib/libcxx/include/__algorithm/unique.h index 056373d06f..d597014596 100644 --- a/lib/libcxx/include/__algorithm/unique.h +++ b/lib/libcxx/include/__algorithm/unique.h @@ -29,7 +29,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // unique template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 std::pair<_Iter, _Iter> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 std::pair<_Iter, _Iter> __unique(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { __first = std::__adjacent_find(__first, __last, __pred); if (__first != __last) { @@ -46,13 +46,13 @@ __unique(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator unique(_ForwardIterator __first, _ForwardIterator __last, _BinaryPredicate __pred) { return std::__unique<_ClassicAlgPolicy>(std::move(__first), std::move(__last), __pred).first; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator unique(_ForwardIterator __first, _ForwardIterator __last) { return std::unique(__first, __last, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/unwrap_iter.h b/lib/libcxx/include/__algorithm/unwrap_iter.h index 50d815c970..8cc0d22d4f 100644 --- a/lib/libcxx/include/__algorithm/unwrap_iter.h +++ b/lib/libcxx/include/__algorithm/unwrap_iter.h @@ -13,7 +13,7 @@ #include <__iterator/iterator_traits.h> #include <__memory/pointer_traits.h> #include <__type_traits/enable_if.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/declval.h> #include <__utility/move.h> diff --git a/lib/libcxx/include/__algorithm/upper_bound.h b/lib/libcxx/include/__algorithm/upper_bound.h index f499f7a80a..c39dec2e89 100644 --- a/lib/libcxx/include/__algorithm/upper_bound.h +++ b/lib/libcxx/include/__algorithm/upper_bound.h @@ -18,7 +18,7 @@ #include <__iterator/advance.h> #include <__iterator/distance.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -48,7 +48,7 @@ __upper_bound(_Iter __first, _Sent __last, const _Tp& __value, _Compare&& __comp } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(is_copy_constructible<_ForwardIterator>::value, "Iterator has to be copy constructible"); return std::__upper_bound<_ClassicAlgPolicy>( @@ -56,7 +56,7 @@ upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::upper_bound(std::move(__first), std::move(__last), __value, __less<>()); } diff --git a/lib/libcxx/include/__assert b/lib/libcxx/include/__assert index eb862b5369..49769fb4d4 100644 --- a/lib/libcxx/include/__assert +++ b/lib/libcxx/include/__assert @@ -34,4 +34,85 @@ # define _LIBCPP_ASSUME(expression) ((void)0) #endif +// clang-format off +// Fast hardening mode checks. + +#if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST + +// Enabled checks. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +// Disabled checks. +// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access. +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) +// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security +// vulnerability. +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) + +// Extensive hardening mode checks. + +#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE + +// Enabled checks. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) +// Disabled checks. +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) + +// Debug hardening mode checks. + +#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG + +// All checks enabled. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) + +// Disable all checks if hardening is not enabled. + +#else + +// All checks disabled. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) + +#endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST +// clang-format on + #endif // _LIBCPP___ASSERT diff --git a/lib/libcxx/include/__assertion_handler b/lib/libcxx/include/__assertion_handler index 8bc0553c07..3b6d6b2cca 100644 --- a/lib/libcxx/include/__assertion_handler +++ b/lib/libcxx/include/__assertion_handler @@ -23,8 +23,17 @@ #else -// TODO(hardening): use `__builtin_verbose_trap(message)` once that becomes available. -# define _LIBCPP_ASSERTION_HANDLER(message) ((void)message, __builtin_trap()) +# if __has_builtin(__builtin_verbose_trap) +// AppleClang shipped a slightly different version of __builtin_verbose_trap from the upstream +// version before upstream Clang actually got the builtin. +# if defined(_LIBCPP_APPLE_CLANG_VER) && _LIBCPP_APPLE_CLANG_VER < 17000 +# define _LIBCPP_ASSERTION_HANDLER(message) __builtin_verbose_trap(message) +# else +# define _LIBCPP_ASSERTION_HANDLER(message) __builtin_verbose_trap("libc++", message) +# endif +# else +# define _LIBCPP_ASSERTION_HANDLER(message) ((void)message, __builtin_trap()) +# endif #endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG diff --git a/lib/libcxx/include/__atomic/aliases.h b/lib/libcxx/include/__atomic/aliases.h index 0fa289de54..e27e09af6b 100644 --- a/lib/libcxx/include/__atomic/aliases.h +++ b/lib/libcxx/include/__atomic/aliases.h @@ -18,7 +18,6 @@ #include <__type_traits/make_unsigned.h> #include #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -92,7 +91,7 @@ using __largest_lock_free_type = short; # elif ATOMIC_CHAR_LOCK_FREE == 2 using __largest_lock_free_type = char; # else -# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen in freestanding) +# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen on unusual platforms) # endif # ifndef _LIBCPP_NO_LOCK_FREE_TYPES diff --git a/lib/libcxx/include/__atomic/atomic.h b/lib/libcxx/include/__atomic/atomic.h index 3dfb6937d0..bd3f659c22 100644 --- a/lib/libcxx/include/__atomic/atomic.h +++ b/lib/libcxx/include/__atomic/atomic.h @@ -462,22 +462,26 @@ atomic_wait_explicit(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __ // atomic_notify_one template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT { __o->notify_one(); } template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT { __o->notify_one(); } // atomic_notify_all template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT { __o->notify_all(); } template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT { __o->notify_all(); } diff --git a/lib/libcxx/include/__atomic/atomic_base.h b/lib/libcxx/include/__atomic/atomic_base.h index 3ad3b562c5..7e26434c9c 100644 --- a/lib/libcxx/include/__atomic/atomic_base.h +++ b/lib/libcxx/include/__atomic/atomic_base.h @@ -14,11 +14,10 @@ #include <__atomic/cxx_atomic_impl.h> #include <__atomic/is_always_lock_free.h> #include <__atomic/memory_order.h> -#include <__availability> #include <__config> #include <__memory/addressof.h> #include <__type_traits/is_integral.h> -#include <__type_traits/is_nothrow_default_constructible.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> #include @@ -34,7 +33,7 @@ struct __atomic_base // false mutable __cxx_atomic_impl<_Tp> __a_; #if _LIBCPP_STD_VER >= 17 - static _LIBCPP_CONSTEXPR bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value; + static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value; #endif _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT { @@ -104,24 +103,20 @@ struct __atomic_base // false _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT { - std::__cxx_atomic_wait(std::addressof(__a_), __v, __m); + std::__atomic_wait(*this, __v, __m); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT { - std::__cxx_atomic_wait(std::addressof(__a_), __v, __m); + std::__atomic_wait(*this, __v, __m); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { - std::__cxx_atomic_notify_one(std::addressof(__a_)); - } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { - std::__cxx_atomic_notify_one(std::addressof(__a_)); + std::__atomic_notify_one(*this); } + _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT { - std::__cxx_atomic_notify_all(std::addressof(__a_)); - } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { - std::__cxx_atomic_notify_all(std::addressof(__a_)); + std::__atomic_notify_all(*this); } + _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); } #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {} @@ -134,11 +129,6 @@ struct __atomic_base // false __atomic_base(const __atomic_base&) = delete; }; -#if _LIBCPP_STD_VER >= 17 -template -_LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; -#endif - // atomic template @@ -200,6 +190,32 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> { _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; } }; +// Here we need _IsIntegral because the default template argument is not enough +// e.g __atomic_base is __atomic_base, which inherits from +// __atomic_base and the caller of the wait function is +// __atomic_base. So specializing __atomic_base<_Tp> does not work +template +struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > { + static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) { + return __a.load(__order); + } + + static _LIBCPP_HIDE_FROM_ABI _Tp + __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) { + return __this.load(__order); + } + + static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>* + __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) { + return std::addressof(__a.__a_); + } + + static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>* + __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) { + return std::addressof(__this.__a_); + } +}; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___ATOMIC_ATOMIC_BASE_H diff --git a/lib/libcxx/include/__atomic/atomic_flag.h b/lib/libcxx/include/__atomic/atomic_flag.h index d76e5e45c0..00b157cdff 100644 --- a/lib/libcxx/include/__atomic/atomic_flag.h +++ b/lib/libcxx/include/__atomic/atomic_flag.h @@ -15,7 +15,8 @@ #include <__atomic/memory_order.h> #include <__chrono/duration.h> #include <__config> -#include <__threading_support> +#include <__memory/addressof.h> +#include <__thread/support.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -47,22 +48,26 @@ struct atomic_flag { __cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(bool __v, memory_order __m = memory_order_seq_cst) const - volatile _NOEXCEPT { - __cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void + wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT { + std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(bool __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT { - __cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); + std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { - __cxx_atomic_notify_one(&__a_); + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { + std::__atomic_notify_one(*this); + } + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { + std::__atomic_notify_one(*this); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { __cxx_atomic_notify_one(&__a_); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT { - __cxx_atomic_notify_all(&__a_); + std::__atomic_notify_all(*this); + } + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { + std::__atomic_notify_all(*this); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { __cxx_atomic_notify_all(&__a_); } #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI constexpr atomic_flag() _NOEXCEPT : __a_(false) {} @@ -77,6 +82,28 @@ struct atomic_flag { atomic_flag& operator=(const atomic_flag&) volatile = delete; }; +template <> +struct __atomic_waitable_traits { + static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) { + return std::__cxx_atomic_load(&__a.__a_, __order); + } + + static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE + __atomic_load(const volatile atomic_flag& __a, memory_order __order) { + return std::__cxx_atomic_load(&__a.__a_, __order); + } + + static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>* + __atomic_contention_address(const atomic_flag& __a) { + return std::addressof(__a.__a_); + } + + static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>* + __atomic_contention_address(const volatile atomic_flag& __a) { + return std::addressof(__a.__a_); + } +}; + inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT { return __o->test(); } inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const atomic_flag* __o) _NOEXCEPT { return __o->test(); } @@ -117,41 +144,43 @@ inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear_explicit(atomic_flag* __o, m __o->clear(__m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait(const volatile atomic_flag* __o, bool __v) _NOEXCEPT { __o->wait(__v); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait(const atomic_flag* __o, bool __v) _NOEXCEPT { __o->wait(__v); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait_explicit(const volatile atomic_flag* __o, bool __v, memory_order __m) _NOEXCEPT { __o->wait(__v, __m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait_explicit(const atomic_flag* __o, bool __v, memory_order __m) _NOEXCEPT { __o->wait(__v, __m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_one(volatile atomic_flag* __o) _NOEXCEPT { __o->notify_one(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT { +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT { __o->notify_one(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_all(volatile atomic_flag* __o) _NOEXCEPT { __o->notify_all(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT { +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT { __o->notify_all(); } diff --git a/lib/libcxx/include/__atomic/atomic_init.h b/lib/libcxx/include/__atomic/atomic_init.h index 8ef5958bfe..8e86ba31b4 100644 --- a/lib/libcxx/include/__atomic/atomic_init.h +++ b/lib/libcxx/include/__atomic/atomic_init.h @@ -15,12 +15,10 @@ # pragma GCC system_header #endif -#define ATOMIC_FLAG_INIT \ - { false } -#define ATOMIC_VAR_INIT(__v) \ - { __v } +#define ATOMIC_FLAG_INIT {false} +#define ATOMIC_VAR_INIT(__v) {__v} -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) +#if _LIBCPP_STD_VER >= 20 && defined(_LIBCPP_COMPILER_CLANG_BASED) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) # pragma clang deprecated(ATOMIC_VAR_INIT) #endif diff --git a/lib/libcxx/include/__atomic/atomic_ref.h b/lib/libcxx/include/__atomic/atomic_ref.h new file mode 100644 index 0000000000..b0180a37ab --- /dev/null +++ b/lib/libcxx/include/__atomic/atomic_ref.h @@ -0,0 +1,378 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H +#define _LIBCPP___ATOMIC_ATOMIC_REF_H + +#include <__assert> +#include <__atomic/atomic_sync.h> +#include <__atomic/check_memory_order.h> +#include <__atomic/to_gcc_order.h> +#include <__concepts/arithmetic.h> +#include <__concepts/same_as.h> +#include <__config> +#include <__memory/addressof.h> +#include <__type_traits/has_unique_object_representation.h> +#include <__type_traits/is_trivially_copyable.h> +#include +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +// These types are required to make __atomic_is_always_lock_free work across GCC and Clang. +// The purpose of this trick is to make sure that we provide an object with the correct alignment +// to __atomic_is_always_lock_free, since that answer depends on the alignment. +template +struct __alignment_checker_type { + alignas(_Alignment) char __data; +}; + +template +struct __get_aligner_instance { + static constexpr __alignment_checker_type<_Alignment> __instance{}; +}; + +template +struct __atomic_ref_base { +private: + _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept { + _Tp* __ptr = std::addressof(__val); +# if __has_builtin(__builtin_clear_padding) + __builtin_clear_padding(__ptr); +# endif + return __ptr; + } + + _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange( + _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept { + if constexpr ( +# if __has_builtin(__builtin_clear_padding) + has_unique_object_representations_v<_Tp> || floating_point<_Tp> +# else + true // NOLINT(readability-simplify-boolean-expr) +# endif + ) { + return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure); + } else { // _Tp has padding bits and __builtin_clear_padding is available + __clear_padding(*__desired); + _Tp __copy = *__expected; + __clear_padding(__copy); + // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the + // values until it has either succeeded, or failed because the value representation of the + // objects involved was different. This is why we loop around __atomic_compare_exchange: + // we basically loop until its failure is caused by the value representation of the objects + // being different, not only their object representation. + while (true) { + _Tp __prev = __copy; + if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) { + return true; + } + _Tp __curr = __copy; + if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) { + // Value representation without padding bits do not compare equal -> + // write the current content of *ptr into *expected + std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp)); + return false; + } + } + } + } + + friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>; + + // require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to at least their size to be potentially + // used lock-free + static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp); + +public: + using value_type = _Tp; + + static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment; + + // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided, + // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed + // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition + // of atomic_ref's constructor. + static constexpr bool is_always_lock_free = + __atomic_always_lock_free(sizeof(_Tp), &__get_aligner_instance::__instance); + + _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); } + + _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic store operation is invalid"); + __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { + store(__desired); + return __desired; + } + + _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || + __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic load operation is invalid"); + alignas(_Tp) byte __mem[sizeof(_Tp)]; + auto* __ret = reinterpret_cast<_Tp*>(__mem); + __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order)); + return *__ret; + } + + _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); } + + _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + alignas(_Tp) byte __mem[sizeof(_Tp)]; + auto* __ret = reinterpret_cast<_Tp*>(__mem); + __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order)); + return *__ret; + } + + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __failure == memory_order::relaxed || __failure == memory_order::consume || + __failure == memory_order::acquire || __failure == memory_order::seq_cst, + "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid"); + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + true, + std::__to_gcc_order(__success), + std::__to_gcc_order(__failure)); + } + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __failure == memory_order::relaxed || __failure == memory_order::consume || + __failure == memory_order::acquire || __failure == memory_order::seq_cst, + "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid"); + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + false, + std::__to_gcc_order(__success), + std::__to_gcc_order(__failure)); + } + + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + true, + std::__to_gcc_order(__order), + std::__to_gcc_failure_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + false, + std::__to_gcc_order(__order), + std::__to_gcc_failure_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || + __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic wait operation is invalid"); + std::__atomic_wait(*this, __old, __order); + } + _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); } + _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); } + +protected: + typedef _Tp _Aligned_Tp __attribute__((aligned(required_alignment))); + _Aligned_Tp* __ptr_; + + _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {} +}; + +template +struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> { + static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) { + return __a.load(__order); + } + static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) { + return __a.__ptr_; + } +}; + +template +struct atomic_ref : public __atomic_ref_base<_Tp> { + static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref requires that 'T' be a trivially copyable type"); + + using __base = __atomic_ref_base<_Tp>; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; +}; + +template + requires(std::integral<_Tp> && !std::same_as) +struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { + using __base = __atomic_ref_base<_Tp>; + + using difference_type = __base::value_type; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); } + _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); } + _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); } + _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); } + _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; } +}; + +template + requires std::floating_point<_Tp> +struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { + using __base = __atomic_ref_base<_Tp>; + + using difference_type = __base::value_type; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old + __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old + __arg; + } + return __old; + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old - __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old - __arg; + } + return __old; + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } +}; + +template +struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> { + using __base = __atomic_ref_base<_Tp*>; + + using difference_type = ptrdiff_t; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {} + + _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); } + _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); } + _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; } +}; + +_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref); + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H diff --git a/lib/libcxx/include/__atomic/atomic_sync.h b/lib/libcxx/include/__atomic/atomic_sync.h index 3d20d6a8ce..aaf81f5873 100644 --- a/lib/libcxx/include/__atomic/atomic_sync.h +++ b/lib/libcxx/include/__atomic/atomic_sync.h @@ -12,13 +12,17 @@ #include <__atomic/contention_t.h> #include <__atomic/cxx_atomic_impl.h> #include <__atomic/memory_order.h> -#include <__availability> +#include <__atomic/to_gcc_order.h> #include <__chrono/duration.h> #include <__config> #include <__memory/addressof.h> #include <__thread/poll_with_backoff.h> -#include <__threading_support> +#include <__thread/support.h> +#include <__type_traits/conjunction.h> #include <__type_traits/decay.h> +#include <__type_traits/invoke.h> +#include <__type_traits/void_t.h> +#include <__utility/declval.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -27,33 +31,101 @@ _LIBCPP_BEGIN_NAMESPACE_STD +// The customisation points to enable the following functions: +// - __atomic_wait +// - __atomic_wait_unless +// - __atomic_notify_one +// - __atomic_notify_all +// Note that std::atomic::wait was back-ported to C++03 +// The below implementations look ugly to support C++03 +template +struct __atomic_waitable_traits { + template + static void __atomic_load(_AtomicWaitable&&, memory_order) = delete; + + template + static void __atomic_contention_address(_AtomicWaitable&&) = delete; +}; + +template +struct __atomic_waitable : false_type {}; + +template +struct __atomic_waitable< _Tp, + __void_t >::__atomic_load( + std::declval(), std::declval())), + decltype(__atomic_waitable_traits<__decay_t<_Tp> >::__atomic_contention_address( + std::declval()))> > : true_type {}; + +template +struct __atomic_wait_poll_impl { + const _AtomicWaitable& __a_; + _Poll __poll_; + memory_order __order_; + + _LIBCPP_HIDE_FROM_ABI bool operator()() const { + auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a_, __order_); + return __poll_(__current_val); + } +}; + #ifndef _LIBCPP_HAS_NO_THREADS -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __libcpp_atomic_wait(void const volatile*, __cxx_contention_t); - -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*); +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t -__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*); +__libcpp_atomic_monitor(void const volatile*) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t); +__libcpp_atomic_wait(void const volatile*, __cxx_contention_t) _NOEXCEPT; + +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void +__cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void +__cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t +__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void +__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t) _NOEXCEPT; + +template +struct __atomic_wait_backoff_impl { + const _AtomicWaitable& __a_; + _Poll __poll_; + memory_order __order_; + + using __waitable_traits = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >; + + _LIBCPP_AVAILABILITY_SYNC + _LIBCPP_HIDE_FROM_ABI bool + __update_monitor_val_and_poll(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor_val) const { + // In case the contention type happens to be __cxx_atomic_contention_t, i.e. __cxx_atomic_impl, + // the platform wait is directly monitoring the atomic value itself. + // `__poll_` takes the current value of the atomic as an in-out argument + // to potentially modify it. After it returns, `__monitor` has a value + // which can be safely waited on by `std::__libcpp_atomic_wait` without any + // ABA style issues. + __monitor_val = __waitable_traits::__atomic_load(__a_, __order_); + return __poll_(__monitor_val); + } + + _LIBCPP_AVAILABILITY_SYNC + _LIBCPP_HIDE_FROM_ABI bool + __update_monitor_val_and_poll(void const volatile* __contention_address, __cxx_contention_t& __monitor_val) const { + // In case the contention type is anything else, platform wait is monitoring a __cxx_atomic_contention_t + // from the global pool, the monitor comes from __libcpp_atomic_monitor + __monitor_val = std::__libcpp_atomic_monitor(__contention_address); + auto __current_val = __waitable_traits::__atomic_load(__a_, __order_); + return __poll_(__current_val); + } -template -struct __libcpp_atomic_wait_backoff_impl { - _Atp* __a; - _Fn __test_fn; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool operator()(chrono::nanoseconds __elapsed) const { if (__elapsed > chrono::microseconds(64)) { - auto const __monitor = std::__libcpp_atomic_monitor(__a); - if (__test_fn()) + auto __contention_address = __waitable_traits::__atomic_contention_address(__a_); + __cxx_contention_t __monitor_val; + if (__update_monitor_val_and_poll(__contention_address, __monitor_val)) return true; - std::__libcpp_atomic_wait(__a, __monitor); + std::__libcpp_atomic_wait(__contention_address, __monitor_val); } else if (__elapsed > chrono::microseconds(4)) __libcpp_thread_yield(); else { @@ -62,23 +134,49 @@ struct __libcpp_atomic_wait_backoff_impl { } }; -template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_wait(_Atp* __a, _Fn&& __test_fn) { - __libcpp_atomic_wait_backoff_impl<_Atp, __decay_t<_Fn> > __backoff_fn = {__a, __test_fn}; - return std::__libcpp_thread_poll_with_backoff(__test_fn, __backoff_fn); +// The semantics of this function are similar to `atomic`'s +// `.wait(T old, std::memory_order order)`, but instead of having a hardcoded +// predicate (is the loaded value unequal to `old`?), the predicate function is +// specified as an argument. The loaded value is given as an in-out argument to +// the predicate. If the predicate function returns `true`, +// `__atomic_wait_unless` will return. If the predicate function returns +// `false`, it must set the argument to its current understanding of the atomic +// value. The predicate function must not return `false` spuriously. +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +__atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_impl = {__a, __poll, __order}; + __atomic_wait_backoff_impl<_AtomicWaitable, __decay_t<_Poll> > __backoff_fn = {__a, __poll, __order}; + std::__libcpp_thread_poll_with_backoff(__poll_impl, __backoff_fn); +} + +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable& __a) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + std::__cxx_atomic_notify_one(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a)); +} + +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable& __a) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + std::__cxx_atomic_notify_all(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a)); } #else // _LIBCPP_HAS_NO_THREADS -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp> const volatile*) {} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp> const volatile*) {} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_wait(_Atp*, _Fn&& __test_fn) { - return std::__libcpp_thread_poll_with_backoff(__test_fn, __spinning_backoff_policy()); +template +_LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) { + __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order}; + std::__libcpp_thread_poll_with_backoff(__poll_fn, __spinning_backoff_policy()); } +template +_LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable&) {} + +template +_LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable&) {} + #endif // _LIBCPP_HAS_NO_THREADS template @@ -86,21 +184,20 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp c return std::memcmp(std::addressof(__lhs), std::addressof(__rhs), sizeof(_Tp)) == 0; } -template -struct __cxx_atomic_wait_test_fn_impl { - _Atp* __a; - _Tp __val; - memory_order __order; - _LIBCPP_HIDE_FROM_ABI bool operator()() const { - return !std::__cxx_nonatomic_compare_equal(std::__cxx_atomic_load(__a, __order), __val); +template +struct __atomic_compare_unequal_to { + _Tp __val_; + _LIBCPP_HIDE_FROM_ABI bool operator()(const _Tp& __arg) const { + return !std::__cxx_nonatomic_compare_equal(__arg, __val_); } }; -template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool -__cxx_atomic_wait(_Atp* __a, _Tp const __val, memory_order __order) { - __cxx_atomic_wait_test_fn_impl<_Atp, _Tp> __test_fn = {__a, __val, __order}; - return std::__cxx_atomic_wait(__a, __test_fn); +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +__atomic_wait(_AtomicWaitable& __a, _Up __val, memory_order __order) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + __atomic_compare_unequal_to<_Up> __nonatomic_equal = {__val}; + std::__atomic_wait_unless(__a, __nonatomic_equal, __order); } _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__atomic/check_memory_order.h b/lib/libcxx/include/__atomic/check_memory_order.h index 3012aec052..536f764a61 100644 --- a/lib/libcxx/include/__atomic/check_memory_order.h +++ b/lib/libcxx/include/__atomic/check_memory_order.h @@ -27,4 +27,8 @@ _LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \ "memory order argument to atomic operation is invalid") +#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m) \ + _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \ + "memory order argument to atomic operation is invalid") + #endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H diff --git a/lib/libcxx/include/__atomic/cxx_atomic_impl.h b/lib/libcxx/include/__atomic/cxx_atomic_impl.h index 1a0b808a0c..18e88aa97b 100644 --- a/lib/libcxx/include/__atomic/cxx_atomic_impl.h +++ b/lib/libcxx/include/__atomic/cxx_atomic_impl.h @@ -9,16 +9,14 @@ #ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H #define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H -#include <__atomic/is_always_lock_free.h> #include <__atomic/memory_order.h> +#include <__atomic/to_gcc_order.h> #include <__config> #include <__memory/addressof.h> -#include <__type_traits/conditional.h> #include <__type_traits/is_assignable.h> #include <__type_traits/is_trivially_copyable.h> #include <__type_traits/remove_const.h> #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -26,7 +24,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS) +#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because // the default operator= in an object is not volatile, a byte-by-byte copy @@ -44,10 +42,6 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, *__to++ = *__from++; } -#endif - -#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) - template struct __cxx_atomic_base_impl { _LIBCPP_HIDE_FROM_ABI @@ -61,32 +55,6 @@ struct __cxx_atomic_base_impl { _Tp __a_value; }; -_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { - // Avoid switch statement to make this a constexpr. - return __order == memory_order_relaxed - ? __ATOMIC_RELAXED - : (__order == memory_order_acquire - ? __ATOMIC_ACQUIRE - : (__order == memory_order_release - ? __ATOMIC_RELEASE - : (__order == memory_order_seq_cst - ? __ATOMIC_SEQ_CST - : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME)))); -} - -_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) { - // Avoid switch statement to make this a constexpr. - return __order == memory_order_relaxed - ? __ATOMIC_RELAXED - : (__order == memory_order_acquire - ? __ATOMIC_ACQUIRE - : (__order == memory_order_release - ? __ATOMIC_RELAXED - : (__order == memory_order_seq_cst - ? __ATOMIC_SEQ_CST - : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME)))); -} - template _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { __cxx_atomic_assign_volatile(__a->__a_value, __val); @@ -529,289 +497,7 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_o #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP -#ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS - -template -struct __cxx_atomic_lock_impl { - _LIBCPP_HIDE_FROM_ABI __cxx_atomic_lock_impl() _NOEXCEPT : __a_value(), __a_lock(0) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT - : __a_value(value), - __a_lock(0) {} - - _Tp __a_value; - mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock; - - _LIBCPP_HIDE_FROM_ABI void __lock() const volatile { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; - } - _LIBCPP_HIDE_FROM_ABI void __lock() const { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; - } - _LIBCPP_HIDE_FROM_ABI void __unlock() const volatile { - __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); - } - _LIBCPP_HIDE_FROM_ABI void __unlock() const { - __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); - } - _LIBCPP_HIDE_FROM_ABI _Tp __read() const volatile { - __lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a_value); - __unlock(); - return __old; - } - _LIBCPP_HIDE_FROM_ABI _Tp __read() const { - __lock(); - _Tp __old = __a_value; - __unlock(); - return __old; - } - _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const volatile { - __lock(); - __cxx_atomic_assign_volatile(*__dst, __a_value); - __unlock(); - } - _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const { - __lock(); - *__dst = __a_value; - __unlock(); - } -}; - -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) { - __cxx_atomic_assign_volatile(__a->__a_value, __val); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) { - __a->__a_value = __val; -} - -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) { - __a->__lock(); - __cxx_atomic_assign_volatile(__a->__a_value, __val); - __a->__unlock(); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) { - __a->__lock(); - __a->__a_value = __val; - __a->__unlock(); -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { - return __a->__read(); -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { - return __a->__read(); -} - -template -_LIBCPP_HIDE_FROM_ABI void -__cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { - __a->__read_inplace(__dst); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { - __a->__read_inplace(__dst); -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, __value); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value = __value; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( - volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - _Tp __temp; - __a->__lock(); - __cxx_atomic_assign_volatile(__temp, __a->__a_value); - bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0); - if (__ret) - __cxx_atomic_assign_volatile(__a->__a_value, __value); - else - __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - __a->__unlock(); - return __ret; -} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( - __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - __a->__lock(); - bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0); - if (__ret) - std::memcpy(&__a->__a_value, &__value, sizeof(_Tp)); - else - std::memcpy(__expected, &__a->__a_value, sizeof(_Tp)); - __a->__unlock(); - return __ret; -} - -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( - volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - _Tp __temp; - __a->__lock(); - __cxx_atomic_assign_volatile(__temp, __a->__a_value); - bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0); - if (__ret) - __cxx_atomic_assign_volatile(__a->__a_value, __value); - else - __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - __a->__unlock(); - return __ret; -} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( - __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - __a->__lock(); - bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0); - if (__ret) - std::memcpy(&__a->__a_value, &__value, sizeof(_Tp)); - else - std::memcpy(__expected, &__a->__a_value, sizeof(_Tp)); - __a->__unlock(); - return __ret; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value += __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp* -__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) { - __a->__lock(); - _Tp* __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) { - __a->__lock(); - _Tp* __old = __a->__a_value; - __a->__a_value += __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value -= __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value &= __pattern; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value |= __pattern; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value ^= __pattern; - __a->__unlock(); - return __old; -} - -template ::__value, - __cxx_atomic_base_impl<_Tp>, - __cxx_atomic_lock_impl<_Tp> >::type> -#else template > -#endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS struct __cxx_atomic_impl : public _Base { static_assert(is_trivially_copyable<_Tp>::value, "std::atomic requires that 'T' be a trivially copyable type"); diff --git a/lib/libcxx/include/__atomic/memory_order.h b/lib/libcxx/include/__atomic/memory_order.h index 16fd186769..294121d1c4 100644 --- a/lib/libcxx/include/__atomic/memory_order.h +++ b/lib/libcxx/include/__atomic/memory_order.h @@ -37,7 +37,7 @@ enum class memory_order : __memory_order_underlying_t { seq_cst = __mo_seq_cst }; -static_assert((is_same::type, __memory_order_underlying_t>::value), +static_assert(is_same::type, __memory_order_underlying_t>::value, "unexpected underlying type for std::memory_order"); inline constexpr auto memory_order_relaxed = memory_order::relaxed; diff --git a/lib/libcxx/include/__atomic/to_gcc_order.h b/lib/libcxx/include/__atomic/to_gcc_order.h new file mode 100644 index 0000000000..d04c111add --- /dev/null +++ b/lib/libcxx/include/__atomic/to_gcc_order.h @@ -0,0 +1,54 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ATOMIC_TO_GCC_ORDER_H +#define _LIBCPP___ATOMIC_TO_GCC_ORDER_H + +#include <__atomic/memory_order.h> +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \ + defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST) + +_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { + // Avoid switch statement to make this a constexpr. + return __order == memory_order_relaxed + ? __ATOMIC_RELAXED + : (__order == memory_order_acquire + ? __ATOMIC_ACQUIRE + : (__order == memory_order_release + ? __ATOMIC_RELEASE + : (__order == memory_order_seq_cst + ? __ATOMIC_SEQ_CST + : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME)))); +} + +_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) { + // Avoid switch statement to make this a constexpr. + return __order == memory_order_relaxed + ? __ATOMIC_RELAXED + : (__order == memory_order_acquire + ? __ATOMIC_ACQUIRE + : (__order == memory_order_release + ? __ATOMIC_RELAXED + : (__order == memory_order_seq_cst + ? __ATOMIC_SEQ_CST + : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME)))); +} + +#endif + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___ATOMIC_TO_GCC_ORDER_H diff --git a/lib/libcxx/include/__availability b/lib/libcxx/include/__availability deleted file mode 100644 index b8b2da9bb1..0000000000 --- a/lib/libcxx/include/__availability +++ /dev/null @@ -1,324 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___AVAILABILITY -#define _LIBCPP___AVAILABILITY - -#include <__config> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -// Libc++ is shipped by various vendors. In particular, it is used as a system -// library on macOS, iOS and other Apple platforms. In order for users to be -// able to compile a binary that is intended to be deployed to an older version -// of a platform, Clang provides availability attributes [1]. These attributes -// can be placed on declarations and are used to describe the life cycle of a -// symbol in the library. -// -// The main goal is to ensure a compile-time error if a symbol that hasn't been -// introduced in a previously released library is used in a program that targets -// that previously released library. Normally, this would be a load-time error -// when one tries to launch the program against the older library. -// -// For example, the filesystem library was introduced in the dylib in macOS 10.15. -// If a user compiles on a macOS 10.15 host but targets macOS 10.13 with their -// program, the compiler would normally not complain (because the required -// declarations are in the headers), but the dynamic loader would fail to find -// the symbols when actually trying to launch the program on macOS 10.13. To -// turn this into a compile-time issue instead, declarations are annotated with -// when they were introduced, and the compiler can produce a diagnostic if the -// program references something that isn't available on the deployment target. -// -// This mechanism is general in nature, and any vendor can add their markup to -// the library (see below). Whenever a new feature is added that requires support -// in the shared library, two macros are added below to allow marking the feature -// as unavailable: -// 1. A macro named `_LIBCPP_AVAILABILITY_HAS_NO_` which must be defined -// exactly when compiling for a target that doesn't support the feature. -// 2. A macro named `_LIBCPP_AVAILABILITY_`, which must always be defined -// and must expand to the proper availability attribute for the platform. -// -// When vendors decide to ship the feature as part of their shared library, they -// can update these macros appropriately for their platform, and the library will -// use those to provide an optimal user experience. -// -// Furthermore, many features in the standard library have corresponding -// feature-test macros. The `_LIBCPP_AVAILABILITY_HAS_NO_` macros -// are checked by the corresponding feature-test macros generated by -// generate_feature_test_macro_components.py to ensure that the library -// doesn't announce a feature as being implemented if it is unavailable on -// the deployment target. -// -// Note that this mechanism is disabled by default in the "upstream" libc++. -// Availability annotations are only meaningful when shipping libc++ inside -// a platform (i.e. as a system library), and so vendors that want them should -// turn those annotations on at CMake configuration time. -// -// [1]: https://clang.llvm.org/docs/AttributeReference.html#availability - -// For backwards compatibility, allow users to define _LIBCPP_DISABLE_AVAILABILITY -// for a while. -#if defined(_LIBCPP_DISABLE_AVAILABILITY) -# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif -#endif - -// Availability markup is disabled when building the library, or when a non-Clang -// compiler is used because only Clang supports the necessary attributes. -// doesn't support the proper attributes. -#if defined(_LIBCPP_BUILDING_LIBRARY) || defined(_LIBCXXABI_BUILDING_LIBRARY) || !defined(_LIBCPP_COMPILER_CLANG_BASED) -# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif -#endif - -#if defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) - -// These macros control the availability of std::bad_optional_access and -// other exception types. These were put in the shared library to prevent -// code bloat from every user program defining the vtable for these exception -// types. -// -// Note that when exceptions are disabled, the methods that normally throw -// these exceptions can be used even on older deployment targets, but those -// methods will abort instead of throwing. -# define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS 1 -# define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS - -# define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS 1 -# define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS - -# define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST 1 -# define _LIBCPP_AVAILABILITY_BAD_ANY_CAST - -// These macros controls the availability of __cxa_init_primary_exception -// in the built library, which std::make_exception_ptr might use -// (see libcxx/include/__exception/exception_ptr.h). -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 1 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION - -// These macros control the availability of all parts of that -// depend on something in the dylib. -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 1 -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP - -// This controls the availability of floating-point std::to_chars functions. -// These overloads were added later than the integer overloads. -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 1 -# define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT - -// This controls the availability of the C++20 synchronization library, -// which requires shared library support for various operations -// (see libcxx/src/atomic.cpp). This includes , , -// , and notification functions on std::atomic. -# define _LIBCPP_AVAILABILITY_HAS_SYNC 1 -# define _LIBCPP_AVAILABILITY_SYNC - -// This controls whether the library claims to provide a default verbose -// termination function, and consequently whether the headers will try -// to use it when the mechanism isn't overriden at compile-time. -# define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT 1 -# define _LIBCPP_AVAILABILITY_VERBOSE_ABORT - -// This controls the availability of the C++17 std::pmr library, -// which is implemented in large part in the built library. -# define _LIBCPP_AVAILABILITY_HAS_PMR 1 -# define _LIBCPP_AVAILABILITY_PMR - -// This controls the availability of the C++20 time zone database. -// The parser code is built in the library. -# define _LIBCPP_AVAILABILITY_HAS_TZDB 1 -# define _LIBCPP_AVAILABILITY_TZDB - -// This controls the availability of C++23 , which -// has a dependency on the built library (it needs access to -// the underlying buffer types of std::cout, std::cerr, and std::clog. -# define _LIBCPP_AVAILABILITY_HAS_PRINT 1 -# define _LIBCPP_AVAILABILITY_PRINT - -// Enable additional explicit instantiations of iostreams components. This -// reduces the number of weak definitions generated in programs that use -// iostreams by providing a single strong definition in the shared library. -// -// TODO: Enable additional explicit instantiations on GCC once it supports exclude_from_explicit_instantiation, -// or once libc++ doesn't use the attribute anymore. -// TODO: Enable them on Windows once https://llvm.org/PR41018 has been fixed. -# if !defined(_LIBCPP_COMPILER_GCC) && !defined(_WIN32) -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 1 -# else -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 -# endif - -#elif defined(__APPLE__) - -# define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS \ - (!defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) || __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 50000) - -# define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS - -# define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS __attribute__((availability(watchos, strict, introduced = 5.0))) -# define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS - -// TODO: Update once this is released -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION __attribute__((unavailable)) - -// -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 1 -# endif -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY \ - __attribute__((availability(macos, strict, introduced = 10.15))) \ - __attribute__((availability(ios, strict, introduced = 13.0))) \ - __attribute__((availability(tvos, strict, introduced = 13.0))) \ - __attribute__((availability(watchos, strict, introduced = 6.0))) -// clang-format off -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH \ - _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") -// clang-format on - -// std::to_chars(floating-point) -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130300) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160300) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160300) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90300) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 1 -# endif -# define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT \ - __attribute__((availability(macos, strict, introduced = 13.3))) \ - __attribute__((availability(ios, strict, introduced = 16.3))) \ - __attribute__((availability(tvos, strict, introduced = 16.3))) \ - __attribute__((availability(watchos, strict, introduced = 9.3))) - -// c++20 synchronization library -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_SYNC 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_SYNC 1 -# endif -# define _LIBCPP_AVAILABILITY_SYNC \ - __attribute__((availability(macos, strict, introduced = 11.0))) \ - __attribute__((availability(ios, strict, introduced = 14.0))) \ - __attribute__((availability(tvos, strict, introduced = 14.0))) \ - __attribute__((availability(watchos, strict, introduced = 7.0))) - -// __libcpp_verbose_abort -// TODO: Update once this is released -# define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT 0 - -# define _LIBCPP_AVAILABILITY_VERBOSE_ABORT __attribute__((unavailable)) - -// std::pmr -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_PMR 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_PMR 1 -# endif -// TODO: Enable std::pmr markup once https://github.com/llvm/llvm-project/issues/40340 has been fixed -// Until then, it is possible for folks to try to use `std::pmr` when back-deploying to targets that don't support -// it and it'll be a load-time error, but we don't have a good alternative because the library won't compile if we -// use availability annotations until that bug has been fixed. -# if 0 -# define _LIBCPP_AVAILABILITY_PMR \ - __attribute__((availability(macos, strict, introduced = 14.0))) \ - __attribute__((availability(ios, strict, introduced = 17.0))) \ - __attribute__((availability(tvos, strict, introduced = 17.0))) \ - __attribute__((availability(watchos, strict, introduced = 10.0))) -# else -# define _LIBCPP_AVAILABILITY_PMR -# endif - -# define _LIBCPP_AVAILABILITY_HAS_TZDB 0 -# define _LIBCPP_AVAILABILITY_TZDB __attribute__((unavailable)) - -// Warning: This availability macro works differently than the other macros. -// The dylib part of print is not needed on Apple platforms. Therefore when -// the macro is not available the code calling the dylib is commented out. -// The macro _LIBCPP_AVAILABILITY_PRINT is not used. -# define _LIBCPP_AVAILABILITY_HAS_PRINT 0 -# define _LIBCPP_AVAILABILITY_PRINT __attribute__((unavailable)) - -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 150000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 150000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 80000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 1 -# endif -#else - -// ...New vendors can add availability markup here... - -# error \ - "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" - -#endif - -// Define availability attributes that depend on _LIBCPP_HAS_NO_EXCEPTIONS. -// Those are defined in terms of the availability attributes above, and -// should not be vendor-specific. -#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) -# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST -# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS -#else -# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_ANY_CAST -# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS -#endif - -// Define availability attributes that depend on both -// _LIBCPP_HAS_NO_EXCEPTIONS and _LIBCPP_HAS_NO_RTTI. -#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) || defined(_LIBCPP_HAS_NO_RTTI) -# undef _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION -# undef _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION -#endif - -#endif // _LIBCPP___AVAILABILITY diff --git a/lib/libcxx/include/__bit/bit_cast.h b/lib/libcxx/include/__bit/bit_cast.h index f20b39ae74..cd04567381 100644 --- a/lib/libcxx/include/__bit/bit_cast.h +++ b/lib/libcxx/include/__bit/bit_cast.h @@ -19,12 +19,21 @@ _LIBCPP_BEGIN_NAMESPACE_STD +#ifndef _LIBCPP_CXX03_LANG + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr _ToType __bit_cast(const _FromType& __from) noexcept { + return __builtin_bit_cast(_ToType, __from); +} + +#endif // _LIBCPP_CXX03_LANG + #if _LIBCPP_STD_VER >= 20 template requires(sizeof(_ToType) == sizeof(_FromType) && is_trivially_copyable_v<_ToType> && is_trivially_copyable_v<_FromType>) -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _ToType bit_cast(const _FromType& __from) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _ToType bit_cast(const _FromType& __from) noexcept { return __builtin_bit_cast(_ToType, __from); } diff --git a/lib/libcxx/include/__bit/bit_ceil.h b/lib/libcxx/include/__bit/bit_ceil.h index 77fa739503..cfd792dc2e 100644 --- a/lib/libcxx/include/__bit/bit_ceil.h +++ b/lib/libcxx/include/__bit/bit_ceil.h @@ -24,7 +24,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) noexcept { if (__t < 2) return 1; const unsigned __n = numeric_limits<_Tp>::digits - std::__countl_zero((_Tp)(__t - 1u)); @@ -42,7 +42,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) no # if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_ceil(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_ceil(_Tp __t) noexcept { return std::__bit_ceil(__t); } diff --git a/lib/libcxx/include/__bit/bit_floor.h b/lib/libcxx/include/__bit/bit_floor.h index cf5cf5803a..133e369504 100644 --- a/lib/libcxx/include/__bit/bit_floor.h +++ b/lib/libcxx/include/__bit/bit_floor.h @@ -23,7 +23,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_floor(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_floor(_Tp __t) noexcept { return __t == 0 ? 0 : _Tp{1} << std::__bit_log2(__t); } diff --git a/lib/libcxx/include/__bit/bit_width.h b/lib/libcxx/include/__bit/bit_width.h index a2020a0142..853e481776 100644 --- a/lib/libcxx/include/__bit/bit_width.h +++ b/lib/libcxx/include/__bit/bit_width.h @@ -22,7 +22,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int bit_width(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int bit_width(_Tp __t) noexcept { return __t == 0 ? 0 : std::__bit_log2(__t) + 1; } diff --git a/lib/libcxx/include/__bit/byteswap.h b/lib/libcxx/include/__bit/byteswap.h index 20045d6fd4..6225ecf2f9 100644 --- a/lib/libcxx/include/__bit/byteswap.h +++ b/lib/libcxx/include/__bit/byteswap.h @@ -23,7 +23,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 23 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp byteswap(_Tp __val) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp byteswap(_Tp __val) noexcept { if constexpr (sizeof(_Tp) == 1) { return __val; } else if constexpr (sizeof(_Tp) == 2) { diff --git a/lib/libcxx/include/__bit/countl.h b/lib/libcxx/include/__bit/countl.h index 396cfc2c3f..998a0b44c1 100644 --- a/lib/libcxx/include/__bit/countl.h +++ b/lib/libcxx/include/__bit/countl.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_clzg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_clzg. + #ifndef _LIBCPP___BIT_COUNTL_H #define _LIBCPP___BIT_COUNTL_H @@ -38,6 +41,9 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_cl #ifndef _LIBCPP_HAS_NO_INT128 inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_clz(__uint128_t __x) _NOEXCEPT { +# if __has_builtin(__builtin_clzg) + return __builtin_clzg(__x); +# else // The function is written in this form due to C++ constexpr limitations. // The algorithm: // - Test whether any bit in the high 64-bits is set @@ -49,12 +55,16 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_clz(__uint128_t __x) // zeros in the high 64-bits. return ((__x >> 64) == 0) ? (64 + __builtin_clzll(static_cast(__x))) : __builtin_clzll(static_cast(__x >> 64)); +# endif } #endif // _LIBCPP_HAS_NO_INT128 template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countl_zero(_Tp __t) _NOEXCEPT { static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_zero requires an unsigned integer type"); +#if __has_builtin(__builtin_clzg) + return __builtin_clzg(__t, numeric_limits<_Tp>::digits); +#else // __has_builtin(__builtin_clzg) if (__t == 0) return numeric_limits<_Tp>::digits; @@ -79,17 +89,18 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countl_zero(_Tp __t) _ } return __ret + __iter; } +#endif // __has_builtin(__builtin_clzg) } #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countl_zero(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_zero(_Tp __t) noexcept { return std::__countl_zero(__t); } template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept { return __t != numeric_limits<_Tp>::max() ? std::countl_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; } diff --git a/lib/libcxx/include/__bit/countr.h b/lib/libcxx/include/__bit/countr.h index 0cc679f87a..9e92021fba 100644 --- a/lib/libcxx/include/__bit/countr.h +++ b/lib/libcxx/include/__bit/countr.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_ctzg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_ctzg. + #ifndef _LIBCPP___BIT_COUNTR_H #define _LIBCPP___BIT_COUNTR_H @@ -35,13 +38,13 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_ct return __builtin_ctzll(__x); } -#if _LIBCPP_STD_VER >= 20 - -template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept { +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countr_zero(_Tp __t) _NOEXCEPT { +#if __has_builtin(__builtin_ctzg) + return __builtin_ctzg(__t, numeric_limits<_Tp>::digits); +#else // __has_builtin(__builtin_ctzg) if (__t == 0) return numeric_limits<_Tp>::digits; - if (sizeof(_Tp) <= sizeof(unsigned int)) return std::__libcpp_ctz(static_cast(__t)); else if (sizeof(_Tp) <= sizeof(unsigned long)) @@ -57,10 +60,18 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) n } return __ret + std::__libcpp_ctz(static_cast(__t)); } +#endif // __has_builtin(__builtin_ctzg) +} + +#if _LIBCPP_STD_VER >= 20 + +template <__libcpp_unsigned_integer _Tp> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept { + return std::__countr_zero(__t); } template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept { return __t != numeric_limits<_Tp>::max() ? std::countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; } diff --git a/lib/libcxx/include/__bit/has_single_bit.h b/lib/libcxx/include/__bit/has_single_bit.h index a4e178060a..52f5853a1b 100644 --- a/lib/libcxx/include/__bit/has_single_bit.h +++ b/lib/libcxx/include/__bit/has_single_bit.h @@ -24,7 +24,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept { return __t != 0 && (((__t & (__t - 1)) == 0)); } diff --git a/lib/libcxx/include/__bit/popcount.h b/lib/libcxx/include/__bit/popcount.h index b0319cef25..5cf0a01d07 100644 --- a/lib/libcxx/include/__bit/popcount.h +++ b/lib/libcxx/include/__bit/popcount.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_popcountg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_popcountg. + #ifndef _LIBCPP___BIT_POPCOUNT_H #define _LIBCPP___BIT_POPCOUNT_H @@ -38,7 +41,10 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_popcount(unsigned lo #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noexcept { +# if __has_builtin(__builtin_popcountg) + return __builtin_popcountg(__t); +# else // __has_builtin(__builtin_popcountg) if (sizeof(_Tp) <= sizeof(unsigned int)) return std::__libcpp_popcount(static_cast(__t)); else if (sizeof(_Tp) <= sizeof(unsigned long)) @@ -53,6 +59,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noex } return __ret; } +# endif // __has_builtin(__builtin_popcountg) } #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__bit/rotate.h b/lib/libcxx/include/__bit/rotate.h index d848056c33..90e430e9d0 100644 --- a/lib/libcxx/include/__bit/rotate.h +++ b/lib/libcxx/include/__bit/rotate.h @@ -20,24 +20,37 @@ _LIBCPP_BEGIN_NAMESPACE_STD +// Writing two full functions for rotl and rotr makes it easier for the compiler +// to optimize the code. On x86 this function becomes the ROL instruction and +// the rotr function becomes the ROR instruction. template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __t, int __cnt) _NOEXCEPT { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires an unsigned integer type"); - const unsigned int __dig = numeric_limits<_Tp>::digits; - if ((__cnt % __dig) == 0) - return __t; +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __x, int __s) _NOEXCEPT { + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotl requires an unsigned integer type"); + const int __N = numeric_limits<_Tp>::digits; + int __r = __s % __N; - if (__cnt < 0) { - __cnt *= -1; - return (__t << (__cnt % __dig)) | (__t >> (__dig - (__cnt % __dig))); // rotr with negative __cnt is similar to rotl - } + if (__r == 0) + return __x; - return (__t >> (__cnt % __dig)) | (__t << (__dig - (__cnt % __dig))); + if (__r > 0) + return (__x << __r) | (__x >> (__N - __r)); + + return (__x >> -__r) | (__x << (__N + __r)); } template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __t, int __cnt) _NOEXCEPT { - return std::__rotr(__t, -__cnt); +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __x, int __s) _NOEXCEPT { + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires an unsigned integer type"); + const int __N = numeric_limits<_Tp>::digits; + int __r = __s % __N; + + if (__r == 0) + return __x; + + if (__r > 0) + return (__x >> __r) | (__x << (__N - __r)); + + return (__x << -__r) | (__x >> (__N + __r)); } #if _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__bit_reference b/lib/libcxx/include/__bit_reference index 3a5339b72d..22637d4397 100644 --- a/lib/libcxx/include/__bit_reference +++ b/lib/libcxx/include/__bit_reference @@ -16,6 +16,7 @@ #include <__bit/countr.h> #include <__bit/invert_if.h> #include <__bit/popcount.h> +#include <__compare/ordering.h> #include <__config> #include <__fwd/bit_reference.h> #include <__iterator/iterator_traits.h> @@ -95,8 +96,8 @@ public: } private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_reference( - __storage_pointer __s, __storage_type __m) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT : __seg_(__s), __mask_(__m) {} }; @@ -149,6 +150,7 @@ public: using __container = typename _Cp::__self; _LIBCPP_HIDE_FROM_ABI __bit_const_reference(const __bit_const_reference&) = default; + __bit_const_reference& operator=(const __bit_const_reference&) = delete; _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_const_reference(const __bit_reference<_Cp>& __x) _NOEXCEPT : __seg_(__x.__seg_), @@ -163,69 +165,12 @@ public: } private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __bit_const_reference( - __storage_pointer __s, __storage_type __m) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR explicit __bit_const_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT : __seg_(__s), __mask_(__m) {} - - __bit_const_reference& operator=(const __bit_const_reference&) = delete; }; -// fill_n - -template -_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void -__fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { - using _It = __bit_iterator<_Cp, false>; - using __storage_type = typename _It::__storage_type; - - const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = std::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - if (_FillVal) - *__first.__seg_ |= __m; - else - *__first.__seg_ &= ~__m; - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - __storage_type __nw = __n / __bits_per_word; - std::fill_n(std::__to_address(__first.__seg_), __nw, _FillVal ? static_cast<__storage_type>(-1) : 0); - __n -= __nw * __bits_per_word; - // do last partial word - if (__n > 0) { - __first.__seg_ += __nw; - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - if (_FillVal) - *__first.__seg_ |= __m; - else - *__first.__seg_ &= ~__m; - } -} - -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void -fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n, bool __value) { - if (__n > 0) { - if (__value) - std::__fill_n(__first, __n); - else - std::__fill_n(__first, __n); - } -} - -// fill - -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void -fill(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __last, bool __value) { - std::fill_n(__first, static_cast(__last - __first), __value); -} - // copy template @@ -969,6 +914,7 @@ public: return __x.__seg_ == __y.__seg_ && __x.__ctz_ == __y.__ctz_; } +#if _LIBCPP_STD_VER <= 17 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 friend bool operator!=(const __bit_iterator& __x, const __bit_iterator& __y) { return !(__x == __y); @@ -993,10 +939,22 @@ public: operator>=(const __bit_iterator& __x, const __bit_iterator& __y) { return !(__x < __y); } +#else // _LIBCPP_STD_VER <= 17 + _LIBCPP_HIDE_FROM_ABI constexpr friend strong_ordering + operator<=>(const __bit_iterator& __x, const __bit_iterator& __y) { + if (__x.__seg_ < __y.__seg_) + return strong_ordering::less; + + if (__x.__seg_ == __y.__seg_) + return __x.__ctz_ <=> __y.__ctz_; + + return strong_ordering::greater; + } +#endif // _LIBCPP_STD_VER <= 17 private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_iterator( - __storage_pointer __s, unsigned __ctz) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_iterator(__storage_pointer __s, unsigned __ctz) _NOEXCEPT : __seg_(__s), __ctz_(__ctz) {} @@ -1007,8 +965,10 @@ private: friend class __bit_iterator<_Cp, true>; template friend struct __bit_array; + template - _LIBCPP_CONSTEXPR_SINCE_CXX20 friend void __fill_n(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + _LIBCPP_CONSTEXPR_SINCE_CXX20 friend void + __fill_n_bool(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); template _LIBCPP_CONSTEXPR_SINCE_CXX20 friend __bit_iterator<_Dp, false> __copy_aligned( @@ -1053,8 +1013,8 @@ private: _LIBCPP_CONSTEXPR_SINCE_CXX20 friend __bit_iterator<_Dp, _IC> __find_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); template - friend typename __bit_iterator<_Dp, _IC>::difference_type _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - __count_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); + friend typename __bit_iterator<_Dp, _IC>::difference_type _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 __count_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); }; _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__charconv/chars_format.h b/lib/libcxx/include/__charconv/chars_format.h index 95faa29010..c76cebd5d1 100644 --- a/lib/libcxx/include/__charconv/chars_format.h +++ b/lib/libcxx/include/__charconv/chars_format.h @@ -39,20 +39,17 @@ inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format operator^(chars_format __x, return chars_format(std::__to_underlying(__x) ^ std::__to_underlying(__y)); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator&=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator&=(chars_format& __x, chars_format __y) { __x = __x & __y; return __x; } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator|=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator|=(chars_format& __x, chars_format __y) { __x = __x | __y; return __x; } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator^=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator^=(chars_format& __x, chars_format __y) { __x = __x ^ __y; return __x; } diff --git a/lib/libcxx/include/__charconv/from_chars_integral.h b/lib/libcxx/include/__charconv/from_chars_integral.h index e969cedb33..c1f033b37b 100644 --- a/lib/libcxx/include/__charconv/from_chars_integral.h +++ b/lib/libcxx/include/__charconv/from_chars_integral.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_FROM_CHARS_INTEGRAL_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__charconv/from_chars_result.h> #include <__charconv/traits.h> #include <__config> diff --git a/lib/libcxx/include/__charconv/to_chars_base_10.h b/lib/libcxx/include/__charconv/to_chars_base_10.h index 0dee351521..c49f4f6797 100644 --- a/lib/libcxx/include/__charconv/to_chars_base_10.h +++ b/lib/libcxx/include/__charconv/to_chars_base_10.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_TO_CHARS_BASE_10_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__charconv/tables.h> #include <__config> #include diff --git a/lib/libcxx/include/__charconv/to_chars_floating_point.h b/lib/libcxx/include/__charconv/to_chars_floating_point.h index 08720e1078..118f316b21 100644 --- a/lib/libcxx/include/__charconv/to_chars_floating_point.h +++ b/lib/libcxx/include/__charconv/to_chars_floating_point.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___CHARCONV_TO_CHARS_FLOATING_POINT_H #define _LIBCPP___CHARCONV_TO_CHARS_FLOATING_POINT_H -#include <__availability> #include <__charconv/chars_format.h> #include <__charconv/to_chars_result.h> #include <__config> diff --git a/lib/libcxx/include/__charconv/to_chars_integral.h b/lib/libcxx/include/__charconv/to_chars_integral.h index 40fbe334d8..0369f4dfb9 100644 --- a/lib/libcxx/include/__charconv/to_chars_integral.h +++ b/lib/libcxx/include/__charconv/to_chars_integral.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_TO_CHARS_INTEGRAL_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__bit/countl.h> #include <__charconv/tables.h> #include <__charconv/to_chars_base_10.h> diff --git a/lib/libcxx/include/__charconv/traits.h b/lib/libcxx/include/__charconv/traits.h index b4907c3f77..c91c6da324 100644 --- a/lib/libcxx/include/__charconv/traits.h +++ b/lib/libcxx/include/__charconv/traits.h @@ -10,6 +10,7 @@ #ifndef _LIBCPP___CHARCONV_TRAITS #define _LIBCPP___CHARCONV_TRAITS +#include <__assert> #include <__bit/countl.h> #include <__charconv/tables.h> #include <__charconv/to_chars_base_10.h> diff --git a/lib/libcxx/include/__chrono/convert_to_tm.h b/lib/libcxx/include/__chrono/convert_to_tm.h index 1301cd6f1f..3a51019b80 100644 --- a/lib/libcxx/include/__chrono/convert_to_tm.h +++ b/lib/libcxx/include/__chrono/convert_to_tm.h @@ -16,10 +16,12 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> #include <__chrono/weekday.h> @@ -27,11 +29,13 @@ #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/same_as.h> #include <__config> #include <__format/format_error.h> #include <__memory/addressof.h> #include <__type_traits/is_convertible.h> +#include <__type_traits/is_specialization.h> #include #include #include @@ -171,6 +175,18 @@ _LIBCPP_HIDE_FROM_ABI _Tm __convert_to_tm(const _ChronoT& __value) { if (__value.hours().count() > std::numeric_limits::max()) std::__throw_format_error("Formatting hh_mm_ss, encountered an hour overflow"); __result.tm_hour = __value.hours().count(); +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + } else if constexpr (same_as<_ChronoT, chrono::sys_info>) { + // Has no time information. + } else if constexpr (same_as<_ChronoT, chrono::local_info>) { + // Has no time information. +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + } else if constexpr (__is_specialization_v<_ChronoT, chrono::zoned_time>) { + return std::__convert_to_tm<_Tm>( + chrono::sys_time{__value.get_local_time().time_since_epoch()}); +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) } else static_assert(sizeof(_ChronoT) == 0, "Add the missing type specialization"); diff --git a/lib/libcxx/include/__chrono/duration.h b/lib/libcxx/include/__chrono/duration.h index 5693ee6440..1e36d73428 100644 --- a/lib/libcxx/include/__chrono/duration.h +++ b/lib/libcxx/include/__chrono/duration.h @@ -391,8 +391,8 @@ operator<=>(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Perio template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count()); } @@ -401,8 +401,8 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count()); } @@ -412,7 +412,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR template ::type>::value, int> = 0> + __enable_if_t::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { typedef typename common_type<_Rep1, _Rep2>::type _Cr; @@ -423,7 +423,7 @@ operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { template ::type>::value, int> = 0> + __enable_if_t::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator*(const _Rep1& __s, const duration<_Rep2, _Period>& __d) { return __d * __s; @@ -435,7 +435,7 @@ template ::value && - is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + is_convertible::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator/(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { @@ -457,7 +457,7 @@ template ::value && - is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + is_convertible::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { @@ -468,8 +468,8 @@ operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type<_Rep1, _Rep2>::type _Cr; typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count())); diff --git a/lib/libcxx/include/__chrono/exception.h b/lib/libcxx/include/__chrono/exception.h new file mode 100644 index 0000000000..266f8fac44 --- /dev/null +++ b/lib/libcxx/include/__chrono/exception.h @@ -0,0 +1,135 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_EXCEPTION_H +#define _LIBCPP___CHRONO_EXCEPTION_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/local_info.h> +# include <__chrono/time_point.h> +# include <__config> +# include <__configuration/availability.h> +# include <__verbose_abort> +# include +# include +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +class nonexistent_local_time : public runtime_error { +public: + template + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time(const local_time<_Duration>& __time, const local_info& __info) + : runtime_error{__create_message(__time, __info)} { + // [time.zone.exception.nonexist]/2 + // Preconditions: i.result == local_info::nonexistent is true. + // The value of __info.result is not used. + _LIBCPP_ASSERT_PEDANTIC(__info.result == local_info::nonexistent, + "creating an nonexistent_local_time from a local_info that is not non-existent"); + } + + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time(const nonexistent_local_time&) = default; + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time& operator=(const nonexistent_local_time&) = default; + + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI ~nonexistent_local_time() override; // exported as key function + +private: + template + _LIBCPP_HIDE_FROM_ABI string __create_message(const local_time<_Duration>& __time, const local_info& __info) { + return std::format( + R"({} is in a gap between +{} {} and +{} {} which are both equivalent to +{} UTC)", + __time, + local_seconds{__info.first.end.time_since_epoch()} + __info.first.offset, + __info.first.abbrev, + local_seconds{__info.second.begin.time_since_epoch()} + __info.second.offset, + __info.second.abbrev, + __info.first.end); + } +}; + +template +_LIBCPP_NORETURN _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI void __throw_nonexistent_local_time( + [[maybe_unused]] const local_time<_Duration>& __time, [[maybe_unused]] const local_info& __info) { +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS + throw nonexistent_local_time(__time, __info); +# else + _LIBCPP_VERBOSE_ABORT("nonexistent_local_time was thrown in -fno-exceptions mode"); +# endif +} + +class ambiguous_local_time : public runtime_error { +public: + template + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time(const local_time<_Duration>& __time, const local_info& __info) + : runtime_error{__create_message(__time, __info)} { + // [time.zone.exception.ambig]/2 + // Preconditions: i.result == local_info::ambiguous is true. + // The value of __info.result is not used. + _LIBCPP_ASSERT_PEDANTIC(__info.result == local_info::ambiguous, + "creating an ambiguous_local_time from a local_info that is not ambiguous"); + } + + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time(const ambiguous_local_time&) = default; + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time& operator=(const ambiguous_local_time&) = default; + + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI ~ambiguous_local_time() override; // exported as key function + +private: + template + _LIBCPP_HIDE_FROM_ABI string __create_message(const local_time<_Duration>& __time, const local_info& __info) { + return std::format( + // There are two spaces after the full-stop; this has been verified + // in the sources of the Standard. + R"({0} is ambiguous. It could be +{0} {1} == {2} UTC or +{0} {3} == {4} UTC)", + __time, + __info.first.abbrev, + __time - __info.first.offset, + __info.second.abbrev, + __time - __info.second.offset); + } +}; + +template +_LIBCPP_NORETURN _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI void __throw_ambiguous_local_time( + [[maybe_unused]] const local_time<_Duration>& __time, [[maybe_unused]] const local_info& __info) { +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS + throw ambiguous_local_time(__time, __info); +# else + _LIBCPP_VERBOSE_ABORT("ambiguous_local_time was thrown in -fno-exceptions mode"); +# endif +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_EXCEPTION_H diff --git a/lib/libcxx/include/__chrono/file_clock.h b/lib/libcxx/include/__chrono/file_clock.h index 7d25729fec..4dd3f88ce5 100644 --- a/lib/libcxx/include/__chrono/file_clock.h +++ b/lib/libcxx/include/__chrono/file_clock.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___CHRONO_FILE_CLOCK_H #define _LIBCPP___CHRONO_FILE_CLOCK_H -#include <__availability> #include <__chrono/duration.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> diff --git a/lib/libcxx/include/__chrono/formatter.h b/lib/libcxx/include/__chrono/formatter.h index 4ad59382a4..449c415e95 100644 --- a/lib/libcxx/include/__chrono/formatter.h +++ b/lib/libcxx/include/__chrono/formatter.h @@ -10,6 +10,7 @@ #ifndef _LIBCPP___CHRONO_FORMATTER_H #define _LIBCPP___CHRONO_FORMATTER_H +#include <__algorithm/ranges_copy.h> #include <__chrono/calendar.h> #include <__chrono/concepts.h> #include <__chrono/convert_to_tm.h> @@ -17,12 +18,14 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/ostream.h> #include <__chrono/parser_std_format_spec.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> #include <__chrono/weekday.h> @@ -30,6 +33,7 @@ #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/arithmetic.h> #include <__concepts/same_as.h> #include <__config> @@ -41,8 +45,10 @@ #include <__format/parser_std_format_spec.h> #include <__format/write_escaped.h> #include <__memory/addressof.h> +#include <__type_traits/is_specialization.h> #include #include +#include #include #include @@ -79,12 +85,15 @@ namespace __formatter { // small). Therefore a duration uses its own conversion. template _LIBCPP_HIDE_FROM_ABI void -__format_sub_seconds(const chrono::duration<_Rep, _Period>& __value, basic_stringstream<_CharT>& __sstr) { +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::duration<_Rep, _Period>& __value) { __sstr << std::use_facet>(__sstr.getloc()).decimal_point(); using __duration = chrono::duration<_Rep, _Period>; auto __fraction = __value - chrono::duration_cast(__value); + // Converts a negative fraction to its positive value. + if (__value < chrono::seconds{0} && __fraction != __duration{0}) + __fraction += chrono::seconds{1}; if constexpr (chrono::treat_as_floating_point_v<_Rep>) // When the floating-point value has digits itself they are ignored based // on the wording in [tab:time.format.spec] @@ -110,13 +119,13 @@ __format_sub_seconds(const chrono::duration<_Rep, _Period>& __value, basic_strin } template -_LIBCPP_HIDE_FROM_ABI void __format_sub_seconds(const _Tp& __value, basic_stringstream<_CharT>& __sstr) { - __formatter::__format_sub_seconds(__value.time_since_epoch(), __sstr); +_LIBCPP_HIDE_FROM_ABI void __format_sub_seconds(basic_stringstream<_CharT>& __sstr, const _Tp& __value) { + __formatter::__format_sub_seconds(__sstr, __value.time_since_epoch()); } template _LIBCPP_HIDE_FROM_ABI void -__format_sub_seconds(const chrono::hh_mm_ss<_Duration>& __value, basic_stringstream<_CharT>& __sstr) { +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::hh_mm_ss<_Duration>& __value) { __sstr << std::use_facet>(__sstr.getloc()).decimal_point(); if constexpr (chrono::treat_as_floating_point_v) std::format_to(std::ostreambuf_iterator<_CharT>{__sstr}, @@ -130,10 +139,24 @@ __format_sub_seconds(const chrono::hh_mm_ss<_Duration>& __value, basic_stringstr __value.fractional_width); } +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) +template +_LIBCPP_HIDE_FROM_ABI void +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::zoned_time<_Duration, _TimeZonePtr>& __value) { + __formatter::__format_sub_seconds(__sstr, __value.get_local_time().time_since_epoch()); +} +# endif + template consteval bool __use_fraction() { if constexpr (__is_time_point<_Tp>) return chrono::hh_mm_ss::fractional_width; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return chrono::hh_mm_ss::fractional_width; +# endif else if constexpr (chrono::__is_duration<_Tp>::value) return chrono::hh_mm_ss<_Tp>::fractional_width; else if constexpr (__is_hh_mm_ss<_Tp>) @@ -143,7 +166,7 @@ consteval bool __use_fraction() { } template -_LIBCPP_HIDE_FROM_ABI void __format_year(int __year, basic_stringstream<_CharT>& __sstr) { +_LIBCPP_HIDE_FROM_ABI void __format_year(basic_stringstream<_CharT>& __sstr, int __year) { if (__year < 0) { __sstr << _CharT('-'); __year = -__year; @@ -159,7 +182,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_year(int __year, basic_stringstream<_CharT>& } template -_LIBCPP_HIDE_FROM_ABI void __format_century(int __year, basic_stringstream<_CharT>& __sstr) { +_LIBCPP_HIDE_FROM_ABI void __format_century(basic_stringstream<_CharT>& __sstr, int __year) { // TODO FMT Write an issue // [tab:time.format.spec] // %C The year divided by 100 using floored division. If the result is a @@ -170,10 +193,56 @@ _LIBCPP_HIDE_FROM_ABI void __format_century(int __year, basic_stringstream<_Char __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __century); } +// Implements the %z format specifier according to [tab:time.format.spec], where +// '__modifier' signals %Oz or %Ez were used. (Both modifiers behave the same, +// so there is no need to distinguish between them.) +template +_LIBCPP_HIDE_FROM_ABI void +__format_zone_offset(basic_stringstream<_CharT>& __sstr, chrono::seconds __offset, bool __modifier) { + if (__offset < 0s) { + __sstr << _CharT('-'); + __offset = -__offset; + } else { + __sstr << _CharT('+'); + } + + chrono::hh_mm_ss __hms{__offset}; + std::ostreambuf_iterator<_CharT> __out_it{__sstr}; + // Note HMS does not allow formatting hours > 23, but the offset is not limited to 24H. + std::format_to(__out_it, _LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __hms.hours().count()); + if (__modifier) + __sstr << _CharT(':'); + std::format_to(__out_it, _LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __hms.minutes().count()); +} + +// Helper to store the time zone information needed for formatting. +struct _LIBCPP_HIDE_FROM_ABI __time_zone { + // Typically these abbreviations are short and fit in the string's internal + // buffer. + string __abbrev; + chrono::seconds __offset; +}; + +template +_LIBCPP_HIDE_FROM_ABI __time_zone __convert_to_time_zone([[maybe_unused]] const _Tp& __value) { +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + if constexpr (same_as<_Tp, chrono::sys_info>) + return {__value.abbrev, __value.offset}; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return __formatter::__convert_to_time_zone(__value.get_info()); +# endif + else +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + return {"UTC", chrono::seconds{0}}; +} + template _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( - const _Tp& __value, basic_stringstream<_CharT>& __sstr, basic_string_view<_CharT> __chrono_specs) { + basic_stringstream<_CharT>& __sstr, const _Tp& __value, basic_string_view<_CharT> __chrono_specs) { tm __t = std::__convert_to_tm(__value); + __time_zone __z = __formatter::__convert_to_time_zone(__value); const auto& __facet = std::use_facet>(__sstr.getloc()); for (auto __it = __chrono_specs.begin(); __it != __chrono_specs.end(); ++__it) { if (*__it == _CharT('%')) { @@ -196,7 +265,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( // strftime's output is only defined in the range [00, 99]. int __year = __t.tm_year + 1900; if (__year < 1000 || __year > 9999) - __formatter::__format_century(__year, __sstr); + __formatter::__format_century(__sstr, __year); else __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); @@ -242,7 +311,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); if constexpr (__use_fraction<_Tp>()) - __formatter::__format_sub_seconds(__value, __sstr); + __formatter::__format_sub_seconds(__sstr, __value); break; // Unlike time_put and strftime the formatting library requires %Y @@ -283,22 +352,24 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( // Depending on the platform's libc the range of supported years is // limited. Intead of of testing all conditions use the internal // implementation unconditionally. - __formatter::__format_year(__t.tm_year + 1900, __sstr); + __formatter::__format_year(__sstr, __t.tm_year + 1900); break; - case _CharT('F'): { - int __year = __t.tm_year + 1900; - if (__year < 1000) { - __formatter::__format_year(__year, __sstr); - __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "-{:02}-{:02}"), __t.tm_mon + 1, __t.tm_mday); - } else - __facet.put( - {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); - } break; + case _CharT('F'): + // Depending on the platform's libc the range of supported years is + // limited. Instead of testing all conditions use the internal + // implementation unconditionally. + __formatter::__format_year(__sstr, __t.tm_year + 1900); + __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "-{:02}-{:02}"), __t.tm_mon + 1, __t.tm_mday); + break; + + case _CharT('z'): + __formatter::__format_zone_offset(__sstr, __z.__offset, false); + break; case _CharT('Z'): - // TODO FMT Add proper timezone support. - __sstr << _LIBCPP_STATICALLY_WIDEN(_CharT, "UTC"); + // __abbrev is always a char so the copy may convert. + ranges::copy(__z.__abbrev, std::ostreambuf_iterator<_CharT>{__sstr}); break; case _CharT('O'): @@ -310,13 +381,19 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( ++__it; __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); - __formatter::__format_sub_seconds(__value, __sstr); + __formatter::__format_sub_seconds(__sstr, __value); break; } } + + // Oz produces the same output as Ez below. [[fallthrough]]; case _CharT('E'): ++__it; + if (*__it == 'z') { + __formatter::__format_zone_offset(__sstr, __z.__offset, true); + break; + } [[fallthrough]]; default: __facet.put( @@ -365,6 +442,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -405,6 +493,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_name_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -445,6 +544,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __date_ok(const _Tp& __value) { return __value.ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -485,6 +595,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __month_name_ok(const _Tp& __value) { return __value.month().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -510,9 +631,16 @@ __format_chrono(const _Tp& __value, __sstr << __value; else { if constexpr (chrono::__is_duration<_Tp>::value) { - if (__value < __value.zero()) - __sstr << _CharT('-'); - __formatter::__format_chrono_using_chrono_specs(chrono::abs(__value), __sstr, __chrono_specs); + // A duration can be a user defined arithmetic type. Users may specialize + // numeric_limits, but they may not specialize is_signed. + if constexpr (numeric_limits::is_signed) { + if (__value < __value.zero()) { + __sstr << _CharT('-'); + __formatter::__format_chrono_using_chrono_specs(__sstr, -__value, __chrono_specs); + } else + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); + } else + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); // TODO FMT When keeping the precision it will truncate the string. // Note that the behaviour what the precision does isn't specified. __specs.__precision_ = -1; @@ -556,7 +684,7 @@ __format_chrono(const _Tp& __value, __sstr << _CharT('-'); } - __formatter::__format_chrono_using_chrono_specs(__value, __sstr, __chrono_specs); + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); } } @@ -814,6 +942,47 @@ public: return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__time); } }; + +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +template <__fmt_char_type _CharT> +struct formatter : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__time_zone); + } +}; + +template <__fmt_char_type _CharT> +struct formatter : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags{}); + } +}; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) +// Note due to how libc++'s formatters are implemented there is no need to add +// the exposition only local-time-format-t abstraction. +template +struct formatter, _CharT> : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__clock); + } +}; +# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && + // !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + #endif // if _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__chrono/leap_second.h b/lib/libcxx/include/__chrono/leap_second.h new file mode 100644 index 0000000000..1a0e7f3107 --- /dev/null +++ b/lib/libcxx/include/__chrono/leap_second.h @@ -0,0 +1,126 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_LEAP_SECOND_H +#define _LIBCPP___CHRONO_LEAP_SECOND_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/duration.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_point.h> +# include <__compare/ordering.h> +# include <__compare/three_way_comparable.h> +# include <__config> +# include <__utility/private_constructor_tag.h> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +class leap_second { +public: + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI explicit constexpr leap_second(__private_constructor_tag, sys_seconds __date, seconds __value) + : __date_(__date), __value_(__value) {} + + _LIBCPP_HIDE_FROM_ABI leap_second(const leap_second&) = default; + _LIBCPP_HIDE_FROM_ABI leap_second& operator=(const leap_second&) = default; + + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr sys_seconds date() const noexcept { return __date_; } + + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr seconds value() const noexcept { return __value_; } + +private: + sys_seconds __date_; + seconds __value_; +}; + +_LIBCPP_HIDE_FROM_ABI inline constexpr bool operator==(const leap_second& __x, const leap_second& __y) { + return __x.date() == __y.date(); +} + +_LIBCPP_HIDE_FROM_ABI inline constexpr strong_ordering operator<=>(const leap_second& __x, const leap_second& __y) { + return __x.date() <=> __y.date(); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator==(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() == __y; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() < __y; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<(const sys_time<_Duration>& __x, const leap_second& __y) { + return __x < __y.date(); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>(const leap_second& __x, const sys_time<_Duration>& __y) { + return __y < __x; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>(const sys_time<_Duration>& __x, const leap_second& __y) { + return __y < __x; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<=(const leap_second& __x, const sys_time<_Duration>& __y) { + return !(__y < __x); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<=(const sys_time<_Duration>& __x, const leap_second& __y) { + return !(__y < __x); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>=(const leap_second& __x, const sys_time<_Duration>& __y) { + return !(__x < __y); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>=(const sys_time<_Duration>& __x, const leap_second& __y) { + return !(__x < __y); +} + +# ifndef _LIBCPP_COMPILER_GCC +// This requirement cause a compilation loop in GCC-13 and running out of memory. +// TODO TZDB Test whether GCC-14 fixes this. +template + requires three_way_comparable_with> +_LIBCPP_HIDE_FROM_ABI constexpr auto operator<=>(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() <=> __y; +} +# endif + +} // namespace chrono + +# endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_LEAP_SECOND_H diff --git a/lib/libcxx/include/__chrono/local_info.h b/lib/libcxx/include/__chrono/local_info.h new file mode 100644 index 0000000000..cfe1448904 --- /dev/null +++ b/lib/libcxx/include/__chrono/local_info.h @@ -0,0 +1,50 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_LOCAL_INFO_H +#define _LIBCPP___CHRONO_LOCAL_INFO_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/sys_info.h> +# include <__config> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +struct local_info { + static constexpr int unique = 0; + static constexpr int nonexistent = 1; + static constexpr int ambiguous = 2; + + int result; + sys_info first; + sys_info second; +}; + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_LOCAL_INFO_H diff --git a/lib/libcxx/include/__chrono/ostream.h b/lib/libcxx/include/__chrono/ostream.h index b687ef8059..e6c43254ee 100644 --- a/lib/libcxx/include/__chrono/ostream.h +++ b/lib/libcxx/include/__chrono/ostream.h @@ -15,20 +15,23 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/weekday.h> #include <__chrono/year.h> #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/same_as.h> #include <__config> #include <__format/format_functions.h> -#include +#include <__fwd/ostream.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -262,6 +265,54 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const hh_mm_ss<_Duration> __hms return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%T}"), __hms); } +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const sys_info& __info) { + // __info.abbrev is always std::basic_string. + // Since these strings typically are short the conversion should be cheap. + std::basic_string<_CharT> __abbrev{__info.abbrev.begin(), __info.abbrev.end()}; + return __os << std::format( + _LIBCPP_STATICALLY_WIDEN(_CharT, "[{:%F %T}, {:%F %T}) {:%T} {:%Q%q} \"{}\""), + __info.begin, + __info.end, + hh_mm_ss{__info.offset}, + __info.save, + __abbrev); +} + +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const local_info& __info) { + auto __result = [&]() -> basic_string<_CharT> { + switch (__info.result) { + case local_info::unique: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "unique"); + case local_info::nonexistent: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "non-existent"); + case local_info::ambiguous: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "ambiguous"); + + default: + return std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "unspecified result ({})"), __info.result); + }; + }; + + return __os << std::format( + _LIBCPP_STATICALLY_WIDEN(_CharT, "{}: {{{}, {}}}"), __result(), __info.first, __info.second); +} + +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const zoned_time<_Duration, _TimeZonePtr>& __tp) { + return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%F %T %Z}"), __tp); +} +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + } // namespace chrono #endif // if _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__chrono/sys_info.h b/lib/libcxx/include/__chrono/sys_info.h new file mode 100644 index 0000000000..11536cbde3 --- /dev/null +++ b/lib/libcxx/include/__chrono/sys_info.h @@ -0,0 +1,51 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_SYS_INFO_H +#define _LIBCPP___CHRONO_SYS_INFO_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/duration.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_point.h> +# include <__config> +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +struct sys_info { + sys_seconds begin; + sys_seconds end; + seconds offset; + minutes save; + string abbrev; +}; + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_SYS_INFO_H diff --git a/lib/libcxx/include/__chrono/time_point.h b/lib/libcxx/include/__chrono/time_point.h index e65253ddb9..aaf0b098f2 100644 --- a/lib/libcxx/include/__chrono/time_point.h +++ b/lib/libcxx/include/__chrono/time_point.h @@ -78,7 +78,7 @@ public: template struct _LIBCPP_TEMPLATE_VIS - common_type, chrono::time_point<_Clock, _Duration2> > { +common_type, chrono::time_point<_Clock, _Duration2> > { typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type; }; @@ -92,25 +92,22 @@ time_point_cast(const time_point<_Clock, _Duration>& __t) { #if _LIBCPP_STD_VER >= 17 template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -floor(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> floor(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::floor<_ToDuration>(__t.time_since_epoch())}; } template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -ceil(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> ceil(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::ceil<_ToDuration>(__t.time_since_epoch())}; } template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -round(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> round(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::round<_ToDuration>(__t.time_since_epoch())}; } template ::is_signed, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration<_Rep, _Period> abs(duration<_Rep, _Period> __d) { +inline _LIBCPP_HIDE_FROM_ABI constexpr duration<_Rep, _Period> abs(duration<_Rep, _Period> __d) { return __d >= __d.zero() ? +__d : -__d; } #endif // _LIBCPP_STD_VER >= 17 @@ -180,9 +177,9 @@ operator<=>(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock // time_point operator+(time_point x, duration y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> - operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Tr; return _Tr(__lhs.time_since_epoch() + __rhs); } @@ -190,18 +187,18 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 // time_point operator+(duration x, time_point y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type, _Duration2>::type> - operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type, _Duration2>::type> +operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { return __rhs + __lhs; } // time_point operator-(time_point x, duration y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> - operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Ret; return _Ret(__lhs.time_since_epoch() - __rhs); } diff --git a/lib/libcxx/include/__chrono/time_zone.h b/lib/libcxx/include/__chrono/time_zone.h new file mode 100644 index 0000000000..de11dac1ee --- /dev/null +++ b/lib/libcxx/include/__chrono/time_zone.h @@ -0,0 +1,182 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_TIME_ZONE_H +#define _LIBCPP___CHRONO_TIME_ZONE_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/duration.h> +# include <__chrono/exception.h> +# include <__chrono/local_info.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__compare/strong_order.h> +# include <__config> +# include <__memory/unique_ptr.h> +# include <__type_traits/common_type.h> +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +enum class choose { earliest, latest }; + +class _LIBCPP_AVAILABILITY_TZDB time_zone { + _LIBCPP_HIDE_FROM_ABI time_zone() = default; + +public: + class __impl; // public so it can be used by make_unique. + + // The "constructor". + // + // The default constructor is private to avoid the constructor from being + // part of the ABI. Instead use an __ugly_named function as an ABI interface, + // since that gives us the ability to change it in the future. + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI static time_zone __create(unique_ptr<__impl>&& __p); + + _LIBCPP_EXPORTED_FROM_ABI ~time_zone(); + + _LIBCPP_HIDE_FROM_ABI time_zone(time_zone&&) = default; + _LIBCPP_HIDE_FROM_ABI time_zone& operator=(time_zone&&) = default; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view name() const noexcept { return __name(); } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_info get_info(const sys_time<_Duration>& __time) const { + return __get_info(chrono::time_point_cast(__time)); + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_info get_info(const local_time<_Duration>& __time) const { + return __get_info(chrono::time_point_cast(__time)); + } + + // We don't apply nodiscard here since this function throws on many inputs, + // so it could be used as a validation. + template + _LIBCPP_HIDE_FROM_ABI sys_time> to_sys(const local_time<_Duration>& __time) const { + local_info __info = get_info(__time); + switch (__info.result) { + case local_info::unique: + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case local_info::nonexistent: + chrono::__throw_nonexistent_local_time(__time, __info); + + case local_info::ambiguous: + chrono::__throw_ambiguous_local_time(__time, __info); + } + + // TODO TZDB The Standard does not specify anything in these cases. + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -1, "cannot convert the local time; it would be before the minimum system clock value"); + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -2, "cannot convert the local time; it would be after the maximum system clock value"); + + return {}; + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_time> + to_sys(const local_time<_Duration>& __time, choose __z) const { + local_info __info = get_info(__time); + switch (__info.result) { + case local_info::unique: + case local_info::nonexistent: // first and second are the same + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case local_info::ambiguous: + switch (__z) { + case choose::earliest: + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case choose::latest: + return sys_time>{__time.time_since_epoch() - __info.second.offset}; + + // Note a value out of bounds is not specified. + } + } + + // TODO TZDB The standard does not specify anything in these cases. + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -1, "cannot convert the local time; it would be before the minimum system clock value"); + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -2, "cannot convert the local time; it would be after the maximum system clock value"); + + return {}; + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_time> + to_local(const sys_time<_Duration>& __time) const { + using _Dp = common_type_t<_Duration, seconds>; + + sys_info __info = get_info(__time); + + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.offset >= chrono::seconds{0} || __time.time_since_epoch() >= _Dp::min() - __info.offset, + "cannot convert the system time; it would be before the minimum local clock value"); + + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.offset <= chrono::seconds{0} || __time.time_since_epoch() <= _Dp::max() - __info.offset, + "cannot convert the system time; it would be after the maximum local clock value"); + + return local_time<_Dp>{__time.time_since_epoch() + __info.offset}; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const __impl& __implementation() const noexcept { return *__impl_; } + +private: + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI string_view __name() const noexcept; + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI sys_info __get_info(sys_seconds __time) const; + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI local_info __get_info(local_seconds __time) const; + + unique_ptr<__impl> __impl_; +}; + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline bool +operator==(const time_zone& __x, const time_zone& __y) noexcept { + return __x.name() == __y.name(); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline strong_ordering +operator<=>(const time_zone& __x, const time_zone& __y) noexcept { + return __x.name() <=> __y.name(); +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) + // && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_TIME_ZONE_H diff --git a/lib/libcxx/include/__chrono/time_zone_link.h b/lib/libcxx/include/__chrono/time_zone_link.h new file mode 100644 index 0000000000..b2d365c5fd --- /dev/null +++ b/lib/libcxx/include/__chrono/time_zone_link.h @@ -0,0 +1,79 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_TIME_ZONE_LINK_H +#define _LIBCPP___CHRONO_TIME_ZONE_LINK_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__compare/strong_order.h> +# include <__config> +# include <__utility/private_constructor_tag.h> +# include +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +class time_zone_link { +public: + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI explicit time_zone_link(__private_constructor_tag, string_view __name, string_view __target) + : __name_{__name}, __target_{__target} {} + + _LIBCPP_HIDE_FROM_ABI time_zone_link(time_zone_link&&) = default; + _LIBCPP_HIDE_FROM_ABI time_zone_link& operator=(time_zone_link&&) = default; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view name() const noexcept { return __name_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view target() const noexcept { return __target_; } + +private: + string __name_; + // TODO TZDB instead of the name we can store the pointer to a zone. These + // pointers are immutable. This makes it possible to directly return a + // pointer in the time_zone in the 'locate_zone' function. + string __target_; +}; + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline bool +operator==(const time_zone_link& __x, const time_zone_link& __y) noexcept { + return __x.name() == __y.name(); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline strong_ordering +operator<=>(const time_zone_link& __x, const time_zone_link& __y) noexcept { + return __x.name() <=> __y.name(); +} + +} // namespace chrono + +# endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_TIME_ZONE_LINK_H diff --git a/lib/libcxx/include/__chrono/tzdb.h b/lib/libcxx/include/__chrono/tzdb.h index bd7b05d478..f731f8c318 100644 --- a/lib/libcxx/include/__chrono/tzdb.h +++ b/lib/libcxx/include/__chrono/tzdb.h @@ -14,14 +14,23 @@ #include // Enable the contents of the header only when libc++ was built with experimental features enabled. -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# include <__algorithm/ranges_lower_bound.h> +# include <__chrono/leap_second.h> +# include <__chrono/time_zone.h> +# include <__chrono/time_zone_link.h> +# include <__config> # include +# include # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header # endif +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD # if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ @@ -29,8 +38,46 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace chrono { -struct _LIBCPP_AVAILABILITY_TZDB tzdb { +struct tzdb { string version; + vector zones; + vector links; + + vector leap_seconds; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const time_zone* __locate_zone(string_view __name) const { + if (const time_zone* __result = __find_in_zone(__name)) + return __result; + + if (auto __it = ranges::lower_bound(links, __name, {}, &time_zone_link::name); + __it != links.end() && __it->name() == __name) + if (const time_zone* __result = __find_in_zone(__it->target())) + return __result; + + return nullptr; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const time_zone* locate_zone(string_view __name) const { + if (const time_zone* __result = __locate_zone(__name)) + return __result; + + std::__throw_runtime_error("tzdb: requested time zone not found"); + } + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI const time_zone* current_zone() const { + return __current_zone(); + } + +private: + _LIBCPP_HIDE_FROM_ABI const time_zone* __find_in_zone(string_view __name) const noexcept { + if (auto __it = ranges::lower_bound(zones, __name, {}, &time_zone::name); + __it != zones.end() && __it->name() == __name) + return std::addressof(*__it); + + return nullptr; + } + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI const time_zone* __current_zone() const; }; } // namespace chrono @@ -40,6 +87,8 @@ struct _LIBCPP_AVAILABILITY_TZDB tzdb { _LIBCPP_END_NAMESPACE_STD -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) #endif // _LIBCPP___CHRONO_TZDB_H diff --git a/lib/libcxx/include/__chrono/tzdb_list.h b/lib/libcxx/include/__chrono/tzdb_list.h index 0494826c01..aeef4fe1ab 100644 --- a/lib/libcxx/include/__chrono/tzdb_list.h +++ b/lib/libcxx/include/__chrono/tzdb_list.h @@ -14,12 +14,13 @@ #include // Enable the contents of the header only when libc++ was built with experimental features enabled. -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) -# include <__availability> +# include <__chrono/time_zone.h> # include <__chrono/tzdb.h> +# include <__config> +# include <__fwd/string.h> # include -# include # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -32,9 +33,18 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace chrono { +// TODO TZDB +// Libc++ recently switched to only export __ugly_names from the dylib. +// Since the library is still experimental the functions in this header +// should be adapted to this new style. The other tzdb headers should be +// evaluated too. + class _LIBCPP_AVAILABILITY_TZDB tzdb_list { public: - _LIBCPP_EXPORTED_FROM_ABI explicit tzdb_list(tzdb&& __tzdb); + class __impl; // public to allow construction in dylib + _LIBCPP_HIDE_FROM_ABI explicit tzdb_list(__impl* __p) : __impl_(__p) { + _LIBCPP_ASSERT_NON_NULL(__impl_ != nullptr, "initialized time_zone without a valid pimpl object"); + } _LIBCPP_EXPORTED_FROM_ABI ~tzdb_list(); tzdb_list(const tzdb_list&) = delete; @@ -42,32 +52,49 @@ public: using const_iterator = forward_list::const_iterator; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const tzdb& front() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const tzdb& front() const noexcept { return __front(); } - _LIBCPP_EXPORTED_FROM_ABI const_iterator erase_after(const_iterator __p); + _LIBCPP_HIDE_FROM_ABI const_iterator erase_after(const_iterator __p) { return __erase_after(__p); } - _LIBCPP_EXPORTED_FROM_ABI tzdb& __emplace_front(tzdb&& __tzdb); + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator begin() const noexcept { return __begin(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator end() const noexcept { return __end(); } - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator begin() const noexcept; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator end() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const noexcept { return __cbegin(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator cend() const noexcept { return __cend(); } - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cbegin() const noexcept; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cend() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI __impl& __implementation() { return *__impl_; } private: - class __impl; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const tzdb& __front() const noexcept; + + _LIBCPP_EXPORTED_FROM_ABI const_iterator __erase_after(const_iterator __p); + + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __begin() const noexcept; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __end() const noexcept; + + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cbegin() const noexcept; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cend() const noexcept; + __impl* __impl_; }; -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI tzdb_list& get_tzdb_list(); +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI tzdb_list& get_tzdb_list(); -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const tzdb& get_tzdb() { +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const tzdb& get_tzdb() { return get_tzdb_list().front(); } +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const time_zone* locate_zone(string_view __name) { + return get_tzdb().locate_zone(__name); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const time_zone* current_zone() { + return get_tzdb().current_zone(); +} + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI const tzdb& reload_tzdb(); -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string remote_version(); +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string remote_version(); } // namespace chrono @@ -76,6 +103,6 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string _LIBCPP_END_NAMESPACE_STD -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) #endif // _LIBCPP___CHRONO_TZDB_LIST_H diff --git a/lib/libcxx/include/__chrono/weekday.h b/lib/libcxx/include/__chrono/weekday.h index 5a7dedc6e3..86c780cc71 100644 --- a/lib/libcxx/include/__chrono/weekday.h +++ b/lib/libcxx/include/__chrono/weekday.h @@ -79,6 +79,8 @@ _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator==(const weekday& __lhs, con return __lhs.c_encoding() == __rhs.c_encoding(); } +// TODO(LLVM 20): Remove the escape hatch +# ifdef _LIBCPP_ENABLE_REMOVED_WEEKDAY_RELATIONAL_OPERATORS _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator<(const weekday& __lhs, const weekday& __rhs) noexcept { return __lhs.c_encoding() < __rhs.c_encoding(); } @@ -94,6 +96,7 @@ _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator<=(const weekday& __lhs, con _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept { return !(__lhs < __rhs); } +# endif // _LIBCPP_ENABLE_REMOVED_WEEKDAY_RELATIONAL_OPERATORS _LIBCPP_HIDE_FROM_ABI inline constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept { auto const __mu = static_cast(__lhs.c_encoding()) + __rhs.count(); diff --git a/lib/libcxx/include/__chrono/year_month_day.h b/lib/libcxx/include/__chrono/year_month_day.h index 75884f3654..b06c0be03e 100644 --- a/lib/libcxx/include/__chrono/year_month_day.h +++ b/lib/libcxx/include/__chrono/year_month_day.h @@ -239,33 +239,11 @@ operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) n return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__lhs == __rhs); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator<(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - if (__lhs.year() < __rhs.year()) - return true; - if (__lhs.year() > __rhs.year()) - return false; - return __lhs.month_day_last() < __rhs.month_day_last(); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator>(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return __rhs < __lhs; -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__rhs < __lhs); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__lhs < __rhs); +_LIBCPP_HIDE_FROM_ABI inline constexpr strong_ordering +operator<=>(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { + if (auto __c = __lhs.year() <=> __rhs.year(); __c != 0) + return __c; + return __lhs.month_day_last() <=> __rhs.month_day_last(); } _LIBCPP_HIDE_FROM_ABI inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept { diff --git a/lib/libcxx/include/__chrono/zoned_time.h b/lib/libcxx/include/__chrono/zoned_time.h new file mode 100644 index 0000000000..8cfa212264 --- /dev/null +++ b/lib/libcxx/include/__chrono/zoned_time.h @@ -0,0 +1,227 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_ZONED_TIME_H +#define _LIBCPP___CHRONO_ZONED_TIME_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/duration.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_zone.h> +# include <__chrono/tzdb_list.h> +# include <__config> +# include <__fwd/string_view.h> +# include <__type_traits/common_type.h> +# include <__type_traits/conditional.h> +# include <__type_traits/remove_cvref.h> +# include <__utility/move.h> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +template +struct zoned_traits {}; + +template <> +struct zoned_traits { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static const time_zone* default_zone() { return chrono::locate_zone("UTC"); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static const time_zone* locate_zone(string_view __name) { + return chrono::locate_zone(__name); + } +}; + +template +class zoned_time { + // [time.zone.zonedtime.ctor]/2 + static_assert(__is_duration<_Duration>::value, + "the program is ill-formed since _Duration is not a specialization of std::chrono::duration"); + + // The wording uses the constraints like + // constructible_from + // Using these constraints in the code causes the compiler to give an + // error that the constraint depends on itself. To avoid that issue use + // the fact it is possible to create this object from a _TimeZonePtr. + using __traits = zoned_traits<_TimeZonePtr>; + +public: + using duration = common_type_t<_Duration, seconds>; + + _LIBCPP_HIDE_FROM_ABI zoned_time() + requires requires { __traits::default_zone(); } + : __zone_{__traits::default_zone()}, __tp_{} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(const zoned_time&) = default; + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const zoned_time&) = default; + + _LIBCPP_HIDE_FROM_ABI zoned_time(const sys_time<_Duration>& __tp) + requires requires { __traits::default_zone(); } + : __zone_{__traits::default_zone()}, __tp_{__tp} {} + + _LIBCPP_HIDE_FROM_ABI explicit zoned_time(_TimeZonePtr __zone) : __zone_{std::move(__zone)}, __tp_{} {} + + _LIBCPP_HIDE_FROM_ABI explicit zoned_time(string_view __name) + requires(requires { __traits::locate_zone(string_view{}); } && + constructible_from<_TimeZonePtr, decltype(__traits::locate_zone(string_view{}))>) + : __zone_{__traits::locate_zone(__name)}, __tp_{} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(const zoned_time<_Duration2, _TimeZonePtr>& __zt) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{__zt.get_time_zone()}, __tp_{__zt.get_sys_time()} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const sys_time<_Duration>& __tp) + : __zone_{std::move(__zone)}, __tp_{__tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const sys_time<_Duration>& __tp) + requires requires { _TimeZonePtr{__traits::locate_zone(string_view{})}; } + : zoned_time{__traits::locate_zone(__name), __tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const local_time<_Duration>& __tp) + requires(is_convertible_v() -> to_sys(local_time<_Duration>{})), + sys_time>) + : __zone_{std::move(__zone)}, __tp_{__zone_->to_sys(__tp)} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const local_time<_Duration>& __tp) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v() -> to_sys(local_time<_Duration>{})), + sys_time>) + : zoned_time{__traits::locate_zone(__name), __tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const local_time<_Duration>& __tp, choose __c) + requires(is_convertible_v< + decltype(std::declval<_TimeZonePtr&>() -> to_sys(local_time<_Duration>{}, choose::earliest)), + sys_time>) + : __zone_{std::move(__zone)}, __tp_{__zone_->to_sys(__tp, __c)} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const local_time<_Duration>& __tp, choose __c) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v() -> to_sys(local_time<_Duration>{}, choose::earliest)), + sys_time>) + : zoned_time{__traits::locate_zone(__name), __tp, __c} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const zoned_time<_Duration2, _TimeZonePtr2>& __zt) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{std::move(__zone)}, __tp_{__zt.get_sys_time()} {} + + // per wording choose has no effect + template + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const zoned_time<_Duration2, _TimeZonePtr2>& __zt, choose) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{std::move(__zone)}, __tp_{__zt.get_sys_time()} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const zoned_time<_Duration2, _TimeZonePtr2>& __zt) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v, sys_time<_Duration>>) + : zoned_time{__traits::locate_zone(__name), __zt} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const zoned_time<_Duration2, _TimeZonePtr2>& __zt, choose __c) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v, sys_time<_Duration>>) + : zoned_time{__traits::locate_zone(__name), __zt, __c} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const sys_time<_Duration>& __tp) { + __tp_ = __tp; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const local_time<_Duration>& __tp) { + // TODO TZDB This seems wrong. + // Assigning a non-existent or ambiguous time will throw and not satisfy + // the post condition. This seems quite odd; I constructed an object with + // choose::earliest and that choice is not respected. + // what did LEWG do with this. + // MSVC STL and libstdc++ behave the same + __tp_ = __zone_->to_sys(__tp); + return *this; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI operator sys_time() const { return get_sys_time(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI explicit operator local_time() const { return get_local_time(); } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _TimeZonePtr get_time_zone() const { return __zone_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_time get_local_time() const { return __zone_->to_local(__tp_); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_time get_sys_time() const { return __tp_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_info get_info() const { return __zone_->get_info(__tp_); } + +private: + _TimeZonePtr __zone_; + sys_time __tp_; +}; + +zoned_time() -> zoned_time; + +template +zoned_time(sys_time<_Duration>) -> zoned_time>; + +template +using __time_zone_representation = + conditional_t, + const time_zone*, + remove_cvref_t<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&) -> zoned_time>; + +template +zoned_time(_TimeZonePtrOrName&&, sys_time<_Duration>) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&, local_time<_Duration>, choose = choose::earliest) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&, zoned_time<_Duration, TimeZonePtr2>, choose = choose::earliest) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +using zoned_seconds = zoned_time; + +template +_LIBCPP_HIDE_FROM_ABI bool +operator==(const zoned_time<_Duration1, _TimeZonePtr>& __lhs, const zoned_time<_Duration2, _TimeZonePtr>& __rhs) { + return __lhs.get_time_zone() == __rhs.get_time_zone() && __lhs.get_sys_time() == __rhs.get_sys_time(); +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) + // && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_ZONED_TIME_H diff --git a/lib/libcxx/include/__compare/partial_order.h b/lib/libcxx/include/__compare/partial_order.h index f3ed4900fb..1d2fae63e5 100644 --- a/lib/libcxx/include/__compare/partial_order.h +++ b/lib/libcxx/include/__compare/partial_order.h @@ -28,6 +28,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __partial_order { +void partial_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) partial_order should use ADL, but only here template diff --git a/lib/libcxx/include/__compare/strong_order.h b/lib/libcxx/include/__compare/strong_order.h index 5f6ade5aef..8c363b5638 100644 --- a/lib/libcxx/include/__compare/strong_order.h +++ b/lib/libcxx/include/__compare/strong_order.h @@ -13,11 +13,14 @@ #include <__compare/compare_three_way.h> #include <__compare/ordering.h> #include <__config> +#include <__math/exponential_functions.h> +#include <__math/traits.h> #include <__type_traits/conditional.h> #include <__type_traits/decay.h> +#include <__type_traits/is_floating_point.h> +#include <__type_traits/is_same.h> #include <__utility/forward.h> #include <__utility/priority_tag.h> -#include #include #include @@ -34,6 +37,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __strong_order { +void strong_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) strong_order should use ADL, but only here template @@ -66,27 +71,27 @@ struct __fn { return strong_ordering::greater; } else if (__t == __u) { if constexpr (numeric_limits<_Dp>::radix == 2) { - return std::signbit(__u) <=> std::signbit(__t); + return __math::signbit(__u) <=> __math::signbit(__t); } else { // This is bullet 3 of the IEEE754 algorithm, relevant // only for decimal floating-point; // see https://stackoverflow.com/questions/69068075/ - if (__t == 0 || std::isinf(__t)) { - return std::signbit(__u) <=> std::signbit(__t); + if (__t == 0 || __math::isinf(__t)) { + return __math::signbit(__u) <=> __math::signbit(__t); } else { int __texp, __uexp; - (void)std::frexp(__t, &__texp); - (void)std::frexp(__u, &__uexp); + (void)__math::frexp(__t, &__texp); + (void)__math::frexp(__u, &__uexp); return (__t < 0) ? (__texp <=> __uexp) : (__uexp <=> __texp); } } } else { // They're unordered, so one of them must be a NAN. // The order is -QNAN, -SNAN, numbers, +SNAN, +QNAN. - bool __t_is_nan = std::isnan(__t); - bool __u_is_nan = std::isnan(__u); - bool __t_is_negative = std::signbit(__t); - bool __u_is_negative = std::signbit(__u); + bool __t_is_nan = __math::isnan(__t); + bool __u_is_nan = __math::isnan(__u); + bool __t_is_negative = __math::signbit(__t); + bool __u_is_negative = __math::signbit(__u); using _IntType = conditional_t< sizeof(__t) == sizeof(int32_t), int32_t, diff --git a/lib/libcxx/include/__compare/synth_three_way.h b/lib/libcxx/include/__compare/synth_three_way.h index 6420d1362d..e48ce49799 100644 --- a/lib/libcxx/include/__compare/synth_three_way.h +++ b/lib/libcxx/include/__compare/synth_three_way.h @@ -25,12 +25,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [expos.only.func] -// TODO MODULES restore the lamba to match the Standard. -// See https://github.com/llvm/llvm-project/issues/57222 -//_LIBCPP_HIDE_FROM_ABI inline constexpr auto __synth_three_way = -// [](const _Tp& __t, const _Up& __u) -template -_LIBCPP_HIDE_FROM_ABI constexpr auto __synth_three_way(const _Tp& __t, const _Up& __u) +_LIBCPP_HIDE_FROM_ABI inline constexpr auto __synth_three_way = [](const _Tp& __t, const _Up& __u) requires requires { { __t < __u } -> __boolean_testable; { __u < __t } -> __boolean_testable; @@ -45,7 +40,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __synth_three_way(const _Tp& __t, const _Up return weak_ordering::greater; return weak_ordering::equivalent; } -} +}; template using __synth_three_way_result = decltype(std::__synth_three_way(std::declval<_Tp&>(), std::declval<_Up&>())); diff --git a/lib/libcxx/include/__compare/weak_order.h b/lib/libcxx/include/__compare/weak_order.h index 9f719eb64b..1a3e85feb2 100644 --- a/lib/libcxx/include/__compare/weak_order.h +++ b/lib/libcxx/include/__compare/weak_order.h @@ -13,10 +13,12 @@ #include <__compare/ordering.h> #include <__compare/strong_order.h> #include <__config> +#include <__math/traits.h> #include <__type_traits/decay.h> +#include <__type_traits/is_floating_point.h> +#include <__type_traits/is_same.h> #include <__utility/forward.h> #include <__utility/priority_tag.h> -#include #ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # pragma GCC system_header @@ -28,6 +30,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __weak_order { +void weak_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) weak_order should use ADL, but only here template @@ -51,10 +55,10 @@ struct __fn { return weak_ordering::greater; } else { // Otherwise, at least one of them is a NaN. - bool __t_is_nan = std::isnan(__t); - bool __u_is_nan = std::isnan(__u); - bool __t_is_negative = std::signbit(__t); - bool __u_is_negative = std::signbit(__u); + bool __t_is_nan = __math::isnan(__t); + bool __u_is_nan = __math::isnan(__u); + bool __t_is_negative = __math::signbit(__t); + bool __u_is_negative = __math::signbit(__u); if (__t_is_nan && __u_is_nan) { return (__u_is_negative <=> __t_is_negative); } else if (__t_is_nan) { diff --git a/lib/libcxx/include/__concepts/class_or_enum.h b/lib/libcxx/include/__concepts/class_or_enum.h index c1b4a8c258..2739e31e14 100644 --- a/lib/libcxx/include/__concepts/class_or_enum.h +++ b/lib/libcxx/include/__concepts/class_or_enum.h @@ -28,11 +28,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD template concept __class_or_enum = is_class_v<_Tp> || is_union_v<_Tp> || is_enum_v<_Tp>; -// Work around Clang bug https://llvm.org/PR52970 -// TODO: remove this workaround once libc++ no longer has to support Clang 13 (it was fixed in Clang 14). -template -concept __workaround_52970 = is_class_v<__remove_cvref_t<_Tp>> || is_union_v<__remove_cvref_t<_Tp>>; - #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__concepts/swappable.h b/lib/libcxx/include/__concepts/swappable.h index 1337dc49d7..d339488a08 100644 --- a/lib/libcxx/include/__concepts/swappable.h +++ b/lib/libcxx/include/__concepts/swappable.h @@ -15,8 +15,8 @@ #include <__concepts/constructible.h> #include <__config> #include <__type_traits/extent.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/remove_cvref.h> #include <__utility/exchange.h> #include <__utility/forward.h> diff --git a/lib/libcxx/include/__condition_variable/condition_variable.h b/lib/libcxx/include/__condition_variable/condition_variable.h index 4d8e590e29..de35aaca10 100644 --- a/lib/libcxx/include/__condition_variable/condition_variable.h +++ b/lib/libcxx/include/__condition_variable/condition_variable.h @@ -9,6 +9,7 @@ #ifndef _LIBCPP___CONDITION_VARIABLE_CONDITION_VARIABLE_H #define _LIBCPP___CONDITION_VARIABLE_CONDITION_VARIABLE_H +#include <__chrono/duration.h> #include <__chrono/steady_clock.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> @@ -16,7 +17,7 @@ #include <__mutex/mutex.h> #include <__mutex/unique_lock.h> #include <__system_error/system_error.h> -#include <__threading_support> +#include <__thread/support.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_floating_point.h> #include <__utility/move.h> diff --git a/lib/libcxx/include/__config b/lib/libcxx/include/__config index 630861acaf..8165dbc549 100644 --- a/lib/libcxx/include/__config +++ b/lib/libcxx/include/__config @@ -11,58 +11,23 @@ #define _LIBCPP___CONFIG /* zig patch: instead of including __config_site, zig adds -D flags when compiling */ +#include <__configuration/abi.h> +#include <__configuration/availability.h> +#include <__configuration/compiler.h> +#include <__configuration/platform.h> #ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # pragma GCC system_header #endif -#if defined(_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) -# pragma clang deprecated( \ - _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES, \ - "_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES is deprecated in LLVM 18 and will be removed in LLVM 19") -#endif -#if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) -# pragma clang deprecated( \ - _LIBCPP_ENABLE_CXX20_REMOVED_FEATURES, \ - "_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES is deprecated in LLVM 18 and will be removed in LLVM 19") -#endif - -#if defined(__apple_build_version__) -// Given AppleClang XX.Y.Z, _LIBCPP_APPLE_CLANG_VER is XXYZ (e.g. AppleClang 14.0.3 => 1403) -# define _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000) -#elif defined(__clang__) -# define _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__) -#elif defined(__GNUC__) -# define _LIBCPP_COMPILER_GCC -# define _LIBCPP_GCC_VER (__GNUC__ * 100 + __GNUC_MINOR__) -#endif - #ifdef __cplusplus -// Warn if a compiler version is used that is not supported anymore -// LLVM RELEASE Update the minimum compiler versions -# if defined(_LIBCPP_CLANG_VER) -# if _LIBCPP_CLANG_VER < 1600 -# warning "Libc++ only supports Clang 16 and later" -# endif -# elif defined(_LIBCPP_APPLE_CLANG_VER) -# if _LIBCPP_APPLE_CLANG_VER < 1500 -# warning "Libc++ only supports AppleClang 15 and later" -# endif -# elif defined(_LIBCPP_GCC_VER) -# if _LIBCPP_GCC_VER < 1300 -# warning "Libc++ only supports GCC 13 and later" -# endif -# endif - // The attributes supported by clang are documented at https://clang.llvm.org/docs/AttributeReference.html // _LIBCPP_VERSION represents the version of libc++, which matches the version of LLVM. // Given a LLVM release LLVM XX.YY.ZZ (e.g. LLVM 17.0.1 == 17.00.01), _LIBCPP_VERSION is // defined to XXYYZZ. -# define _LIBCPP_VERSION 180100 +# define _LIBCPP_VERSION 190100 # define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y # define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y) @@ -71,170 +36,12 @@ # define _LIBCPP_FREESTANDING # endif -// NOLINTBEGIN(libcpp-cpp-version-check) -# ifndef _LIBCPP_STD_VER -# if __cplusplus <= 201103L -# define _LIBCPP_STD_VER 11 -# elif __cplusplus <= 201402L -# define _LIBCPP_STD_VER 14 -# elif __cplusplus <= 201703L -# define _LIBCPP_STD_VER 17 -# elif __cplusplus <= 202002L -# define _LIBCPP_STD_VER 20 -# elif __cplusplus <= 202302L -# define _LIBCPP_STD_VER 23 -# else -// Expected release year of the next C++ standard -# define _LIBCPP_STD_VER 26 -# endif -# endif // _LIBCPP_STD_VER -// NOLINTEND(libcpp-cpp-version-check) - -# if defined(__ELF__) -# define _LIBCPP_OBJECT_FORMAT_ELF 1 -# elif defined(__MACH__) -# define _LIBCPP_OBJECT_FORMAT_MACHO 1 -# elif defined(_WIN32) -# define _LIBCPP_OBJECT_FORMAT_COFF 1 -# elif defined(__wasm__) -# define _LIBCPP_OBJECT_FORMAT_WASM 1 -# elif defined(_AIX) -# define _LIBCPP_OBJECT_FORMAT_XCOFF 1 -# else -// ... add new file formats here ... -# endif - -// ABI { - -# if _LIBCPP_ABI_VERSION >= 2 -// Change short string representation so that string data starts at offset 0, -// improving its alignment in some cases. -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -// Fix deque iterator type in order to support incomplete types. -# define _LIBCPP_ABI_INCOMPLETE_TYPES_IN_DEQUE -// Fix undefined behavior in how std::list stores its linked nodes. -# define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB -// Fix undefined behavior in how __tree stores its end and parent nodes. -# define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB -// Fix undefined behavior in how __hash_table stores its pointer types. -# define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB -# define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB -# define _LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE -// Define a key function for `bad_function_call` in the library, to centralize -// its vtable and typeinfo to libc++ rather than having all other libraries -// using that class define their own copies. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -// Override the default return value of exception::what() for -// bad_function_call::what() with a string that is specific to -// bad_function_call (see http://wg21.link/LWG2233). This is an ABI break -// because it changes the vtable layout of bad_function_call. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_GOOD_WHAT_MESSAGE -// Enable optimized version of __do_get_(un)signed which avoids redundant copies. -# define _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET -// Give reverse_iterator one data member of type T, not two. -// Also, in C++17 and later, don't derive iterator types from std::iterator. -# define _LIBCPP_ABI_NO_ITERATOR_BASES -// Use the smallest possible integer type to represent the index of the variant. -// Previously libc++ used "unsigned int" exclusively. -# define _LIBCPP_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION -// Unstable attempt to provide a more optimized std::function -# define _LIBCPP_ABI_OPTIMIZED_FUNCTION -// All the regex constants must be distinct and nonzero. -# define _LIBCPP_ABI_REGEX_CONSTANTS_NONZERO -// Re-worked external template instantiations for std::string with a focus on -// performance and fast-path inlining. -# define _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION -// Enable clang::trivial_abi on std::unique_ptr. -# define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI -// Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr -# define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI -// std::random_device holds some state when it uses an implementation that gets -// entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this -// implementation to another one on a platform that has already shipped -// std::random_device, one needs to retain the same object layout to remain ABI -// compatible. This switch removes these workarounds for platforms that don't care -// about ABI compatibility. -# define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT -// Don't export the legacy __basic_string_common class and its methods from the built library. -# define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON -// Don't export the legacy __vector_base_common class and its methods from the built library. -# define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON -// According to the Standard, `bitset::operator[] const` returns bool -# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL -// Fix the implementation of CityHash used for std::hash. -// This is an ABI break because `std::hash` will return a different result, -// which means that hashing the same object in translation units built against -// different versions of libc++ can return inconsistent results. This is especially -// tricky since std::hash is used in the implementation of unordered containers. -// -// The incorrect implementation of CityHash has the problem that it drops some -// bits on the floor. -# define _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION -// Remove the base 10 implementation of std::to_chars from the dylib. -// The implementation moved to the header, but we still export the symbols from -// the dylib for backwards compatibility. -# define _LIBCPP_ABI_DO_NOT_EXPORT_TO_CHARS_BASE_10 -# elif _LIBCPP_ABI_VERSION == 1 -# if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) -// Enable compiling copies of now inline methods into the dylib to support -// applications compiled against older libraries. This is unnecessary with -// COFF dllexport semantics, since dllexport forces a non-inline definition -// of inline functions to be emitted anyway. Our own non-inline copy would -// conflict with the dllexport-emitted copy, so we disable it. For XCOFF, -// the linker will take issue with the symbols in the shared object if the -// weak inline methods get visibility (such as from -fvisibility-inlines-hidden), -// so disable it. -# define _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS -# endif -// Feature macros for disabling pre ABI v1 features. All of these options -// are deprecated. -# if defined(__FreeBSD__) && __FreeBSD__ < 14 -# define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR -# endif -// For XCOFF linkers, we have problems if we see a weak hidden version of a symbol -// in user code (like you get with -fvisibility-inlines-hidden) and then a strong def -// in the library, so we need to always rely on the library version. -# if defined(_AIX) -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -# endif -# endif - -# if defined(_LIBCPP_BUILDING_LIBRARY) || _LIBCPP_ABI_VERSION >= 2 -// Define a key function for `bad_function_call` in the library, to centralize -// its vtable and typeinfo to libc++ rather than having all other libraries -// using that class define their own copies. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -# endif - -// We had some bugs where we use [[no_unique_address]] together with construct_at, -// which causes UB as the call on construct_at could write to overlapping subobjects -// -// https://github.com/llvm/llvm-project/issues/70506 -// https://github.com/llvm/llvm-project/issues/70494 -// -// To fix the bug we had to change the ABI of some classes to remove [[no_unique_address]] under certain conditions. -// The macro below is used for all classes whose ABI have changed as part of fixing these bugs. -# define _LIBCPP_ABI_LLVM18_NO_UNIQUE_ADDRESS __attribute__((__abi_tag__("llvm18_nua"))) - -// Changes the iterator type of select containers (see below) to a bounded iterator that keeps track of whether it's -// within the bounds of the original container and asserts it on every dereference. -// -// ABI impact: changes the iterator type of the relevant containers. -// -// Supported containers: -// - `span`; -// - `string_view`; -// - `array`. -// #define _LIBCPP_ABI_BOUNDED_ITERATORS - -// } ABI - // HARDENING { -// TODO(hardening): deprecate this in LLVM 19. // This is for backward compatibility -- make enabling `_LIBCPP_ENABLE_ASSERTIONS` (which predates hardening modes) -// equivalent to setting the extensive mode. +// equivalent to setting the extensive mode. This is deprecated and will be removed in LLVM 20. # ifdef _LIBCPP_ENABLE_ASSERTIONS +# warning "_LIBCPP_ENABLE_ASSERTIONS is deprecated, please use _LIBCPP_HARDENING_MODE instead" # if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1 # error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1" # endif @@ -325,6 +132,12 @@ // clang-format on # ifndef _LIBCPP_HARDENING_MODE + +# ifndef _LIBCPP_HARDENING_MODE_DEFAULT +# error _LIBCPP_HARDENING_MODE_DEFAULT is not defined. This definition should be set at configuration time in the \ +`__config_site` header, please make sure your installation of libc++ is not broken. +# endif + # define _LIBCPP_HARDENING_MODE _LIBCPP_HARDENING_MODE_DEFAULT # endif @@ -339,87 +152,6 @@ _LIBCPP_HARDENING_MODE_EXTENSIVE, \ _LIBCPP_HARDENING_MODE_DEBUG # endif -// clang-format off -// Fast hardening mode checks. - -# if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST - -// Enabled checks. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -// Disabled checks. -// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access. -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) -// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security -// vulnerability. -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) - -// Extensive hardening mode checks. - -# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE - -// Enabled checks. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) -// Disabled checks. -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) - -// Debug hardening mode checks. - -# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG - -// All checks enabled. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) - -// Disable all checks if hardening is not enabled. - -# else - -// All checks disabled. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) - -# endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST -// clang-format on - // } HARDENING # define _LIBCPP_TOSTRING2(x) #x @@ -430,30 +162,15 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _LIBCPP_CXX03_LANG # endif -# ifndef __has_attribute -# define __has_attribute(__x) 0 -# endif - -# ifndef __has_builtin -# define __has_builtin(__x) 0 -# endif - -# ifndef __has_extension -# define __has_extension(__x) 0 -# endif - -# ifndef __has_feature -# define __has_feature(__x) 0 -# endif - -# ifndef __has_cpp_attribute -# define __has_cpp_attribute(__x) 0 -# endif - # ifndef __has_constexpr_builtin # define __has_constexpr_builtin(x) 0 # endif +// This checks wheter a Clang module is built +# ifndef __building_module +# define __building_module(...) 0 +# endif + // '__is_identifier' returns '0' if '__x' is a reserved identifier provided by // the compiler and '1' otherwise. # ifndef __is_identifier @@ -466,8 +183,8 @@ _LIBCPP_HARDENING_MODE_DEBUG # define __has_keyword(__x) !(__is_identifier(__x)) -# ifndef __has_include -# define __has_include(...) 0 +# ifndef __has_warning +# define __has_warning(...) 0 # endif # if !defined(_LIBCPP_COMPILER_CLANG_BASED) && __cplusplus < 201103L @@ -508,35 +225,14 @@ _LIBCPP_HARDENING_MODE_DEBUG # if !defined(_LIBCPP_ENABLE_EXPERIMENTAL) && !defined(_LIBCPP_BUILDING_LIBRARY) # define _LIBCPP_HAS_NO_INCOMPLETE_PSTL # define _LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN -# define _LIBCPP_HAS_NO_INCOMPLETE_TZDB +# define _LIBCPP_HAS_NO_EXPERIMENTAL_TZDB # define _LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM # endif -// Need to detect which libc we're using if we're on Linux. -# if defined(__linux__) -# include -# if defined(__GLIBC_PREREQ) -# define _LIBCPP_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) -# else -# define _LIBCPP_GLIBC_PREREQ(a, b) 0 -# endif // defined(__GLIBC_PREREQ) -# endif // defined(__linux__) - # if defined(__MVS__) # include // for __NATIVE_ASCII_F # endif -# ifndef __BYTE_ORDER__ -# error \ - "Your compiler doesn't seem to define __BYTE_ORDER__, which is required by libc++ to know the endianness of your target platform" -# endif - -# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -# define _LIBCPP_LITTLE_ENDIAN -# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -# define _LIBCPP_BIG_ENDIAN -# endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - # if defined(_WIN32) # define _LIBCPP_WIN32API # define _LIBCPP_SHORT_WCHAR 1 @@ -618,7 +314,7 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _ALIGNAS(x) alignas(x) # define _LIBCPP_NORETURN [[noreturn]] # define _NOEXCEPT noexcept -# define _NOEXCEPT_(x) noexcept(x) +# define _NOEXCEPT_(...) noexcept(__VA_ARGS__) # define _LIBCPP_CONSTEXPR constexpr # else @@ -630,7 +326,7 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _LIBCPP_HAS_NO_NOEXCEPT # define nullptr __nullptr # define _NOEXCEPT throw() -# define _NOEXCEPT_(x) +# define _NOEXCEPT_(...) # define static_assert(...) _Static_assert(__VA_ARGS__) # define decltype(...) __decltype(__VA_ARGS__) # define _LIBCPP_CONSTEXPR @@ -640,63 +336,32 @@ typedef __char32_t char32_t; # endif -# if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L -# define _LIBCPP_HAS_NO_EXCEPTIONS -# endif - # define _LIBCPP_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) -# if defined(_LIBCPP_COMPILER_CLANG_BASED) - -# if defined(__APPLE__) -# if defined(__i386__) || defined(__x86_64__) -// use old string layout on x86_64 and i386 -# elif defined(__arm__) -// use old string layout on arm (which does not include aarch64/arm64), except on watch ABIs -# if defined(__ARM_ARCH_7K__) && __ARM_ARCH_7K__ >= 2 -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -# endif -# else -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -# endif -# endif - // Objective-C++ features (opt-in) -# if __has_feature(objc_arc) -# define _LIBCPP_HAS_OBJC_ARC -# endif +# if __has_feature(objc_arc) +# define _LIBCPP_HAS_OBJC_ARC +# endif -# if __has_feature(objc_arc_weak) -# define _LIBCPP_HAS_OBJC_ARC_WEAK -# endif +# if __has_feature(objc_arc_weak) +# define _LIBCPP_HAS_OBJC_ARC_WEAK +# endif -# if __has_extension(blocks) -# define _LIBCPP_HAS_EXTENSION_BLOCKS -# endif +# if __has_extension(blocks) +# define _LIBCPP_HAS_EXTENSION_BLOCKS +# endif -# if defined(_LIBCPP_HAS_EXTENSION_BLOCKS) && defined(__APPLE__) -# define _LIBCPP_HAS_BLOCKS_RUNTIME -# endif +# if defined(_LIBCPP_HAS_EXTENSION_BLOCKS) && defined(__APPLE__) +# define _LIBCPP_HAS_BLOCKS_RUNTIME +# endif -# if !__has_feature(address_sanitizer) -# define _LIBCPP_HAS_NO_ASAN -# endif +# if !__has_feature(address_sanitizer) +# define _LIBCPP_HAS_NO_ASAN +# endif -# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) +# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) -# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ - -# elif defined(_LIBCPP_COMPILER_GCC) - -# if !defined(__SANITIZE_ADDRESS__) -# define _LIBCPP_HAS_NO_ASAN -# endif - -# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) - -# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ - -# endif // _LIBCPP_COMPILER_[CLANG|GCC] +# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ # if defined(_LIBCPP_OBJECT_FORMAT_COFF) @@ -787,6 +452,23 @@ typedef __char32_t char32_t; # define _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCPP_ALWAYS_INLINE # endif +# ifdef _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") +# define _LIBCPP_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(clang diagnostic ignored str)) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) +# elif defined(_LIBCPP_COMPILER_GCC) +# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") +# define _LIBCPP_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(GCC diagnostic ignored str)) +# else +# define _LIBCPP_DIAGNOSTIC_PUSH +# define _LIBCPP_DIAGNOSTIC_POP +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) +# endif + # if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST # define _LIBCPP_HARDENING_SIG f # elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE @@ -848,22 +530,24 @@ typedef __char32_t char32_t; // the implementation of a virtual function in an ABI-incompatible way in the first place, // since that would be an ABI break anyway. Hence, the lack of ABI tag should not be noticeable. // +// The macro can be applied to record and enum types. When the tagged type is nested in +// a record this "parent" record needs to have the macro too. Another use case for applying +// this macro to records and unions is to apply an ABI tag to inline constexpr variables. +// This can be useful for inline variables that are implementation details which are expected +// to change in the future. +// // TODO: We provide a escape hatch with _LIBCPP_NO_ABI_TAG for folks who want to avoid increasing // the length of symbols with an ABI tag. In practice, we should remove the escape hatch and // use compression mangling instead, see https://github.com/itanium-cxx-abi/cxx-abi/issues/70. # ifndef _LIBCPP_NO_ABI_TAG # define _LIBCPP_HIDE_FROM_ABI \ _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION \ - __attribute__((__abi_tag__(_LIBCPP_TOSTRING(_LIBCPP_ODR_SIGNATURE)))) + __attribute__((__abi_tag__(_LIBCPP_TOSTRING(_LIBCPP_ODR_SIGNATURE)))) # else # define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION # endif # define _LIBCPP_HIDE_FROM_ABI_VIRTUAL _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION -// This macro provides a HIDE_FROM_ABI equivalent that can be applied to extern -// "C" function, as those lack mangling. -# define _LIBCPP_HIDE_FROM_ABI_C _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION - # ifdef _LIBCPP_BUILDING_LIBRARY # if _LIBCPP_ABI_VERSION > 1 # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI @@ -874,21 +558,51 @@ typedef __char32_t char32_t; # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI # endif -// TODO(LLVM-19): Remove _LIBCPP_INLINE_VISIBILITY and _VSTD, which we're keeping around -// only to ease the renaming for downstreams. -# define _LIBCPP_INLINE_VISIBILITY _LIBCPP_HIDE_FROM_ABI -# define _VSTD std +// TODO: Remove this workaround once we drop support for Clang 16 +# if __has_warning("-Wc++23-extensions") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++23-extensions") +# else +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++2b-extensions") +# endif + +// Clang modules take a significant compile time hit when pushing and popping diagnostics. +// Since all the headers are marked as system headers in the modulemap, we can simply disable this +// pushing and popping when building with clang modules. +# if !__has_feature(modules) +# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \ + _LIBCPP_DIAGNOSTIC_PUSH \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++11-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++23-extensions") +# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS _LIBCPP_DIAGNOSTIC_POP +# else +# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS +# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS +# endif // Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. // clang-format off -# define _LIBCPP_BEGIN_NAMESPACE_STD namespace _LIBCPP_TYPE_VISIBILITY_DEFAULT std { \ +# define _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \ + namespace _LIBCPP_TYPE_VISIBILITY_DEFAULT std { \ inline namespace _LIBCPP_ABI_NAMESPACE { -# define _LIBCPP_END_NAMESPACE_STD }} +# define _LIBCPP_END_NAMESPACE_STD }} _LIBCPP_POP_EXTENSION_DIAGNOSTICS +#ifdef _LIBCPP_ABI_NO_FILESYSTEM_INLINE_NAMESPACE +# define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM _LIBCPP_BEGIN_NAMESPACE_STD namespace filesystem { +# define _LIBCPP_END_NAMESPACE_FILESYSTEM } _LIBCPP_END_NAMESPACE_STD +#else # define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM _LIBCPP_BEGIN_NAMESPACE_STD \ inline namespace __fs { namespace filesystem { -# define _LIBCPP_END_NAMESPACE_FILESYSTEM _LIBCPP_END_NAMESPACE_STD }} +# define _LIBCPP_END_NAMESPACE_FILESYSTEM }} _LIBCPP_END_NAMESPACE_STD +#endif + // clang-format on # if __has_attribute(__enable_if__) @@ -985,6 +699,14 @@ typedef __char32_t char32_t; # define _LIBCPP_DEPRECATED_(m) # endif +# if _LIBCPP_STD_VER < 20 +# define _LIBCPP_DEPRECATED_ATOMIC_SYNC \ + _LIBCPP_DEPRECATED_("The C++20 synchronization library has been deprecated prior to C++20. Please update to " \ + "using -std=c++20 if you need to use these facilities.") +# else +# define _LIBCPP_DEPRECATED_ATOMIC_SYNC /* nothing */ +# endif + # if !defined(_LIBCPP_CXX03_LANG) # define _LIBCPP_DEPRECATED_IN_CXX11 _LIBCPP_DEPRECATED # else @@ -1015,6 +737,12 @@ typedef __char32_t char32_t; # define _LIBCPP_DEPRECATED_IN_CXX23 # endif +# if _LIBCPP_STD_VER >= 26 +# define _LIBCPP_DEPRECATED_IN_CXX26 _LIBCPP_DEPRECATED +# else +# define _LIBCPP_DEPRECATED_IN_CXX26 +# endif + # if !defined(_LIBCPP_HAS_NO_CHAR8_T) # define _LIBCPP_DEPRECATED_WITH_CHAR8_T _LIBCPP_DEPRECATED # else @@ -1068,20 +796,6 @@ typedef __char32_t char32_t; # define _LIBCPP_CONSTEXPR_SINCE_CXX23 # endif -# ifndef _LIBCPP_HAS_NO_ASAN -extern "C" _LIBCPP_EXPORTED_FROM_ABI void -__sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); -extern "C" _LIBCPP_EXPORTED_FROM_ABI void __sanitizer_annotate_double_ended_contiguous_container( - const void*, const void*, const void*, const void*, const void*, const void*); -extern "C" _LIBCPP_EXPORTED_FROM_ABI int -__sanitizer_verify_double_ended_contiguous_container(const void*, const void*, const void*, const void*); -# endif - -// Try to find out if RTTI is disabled. -# if !defined(__cpp_rtti) || __cpp_rtti < 199711L -# define _LIBCPP_HAS_NO_RTTI -# endif - # ifndef _LIBCPP_WEAK # define _LIBCPP_WEAK __attribute__((__weak__)) # endif @@ -1193,9 +907,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # ifndef _LIBCPP_ATOMIC_FLAG_TYPE # define _LIBCPP_ATOMIC_FLAG_TYPE bool # endif -# ifdef _LIBCPP_FREESTANDING -# define _LIBCPP_ATOMIC_ONLY_USE_BUILTINS -# endif # endif # if defined(__FreeBSD__) && defined(__clang__) && __has_attribute(__no_thread_safety_analysis__) @@ -1257,23 +968,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_IF_WIDE_CHARACTERS(...) __VA_ARGS__ # endif -# if defined(_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES) -# define _LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR -# define _LIBCPP_ENABLE_CXX17_REMOVED_BINDERS -# define _LIBCPP_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE -# define _LIBCPP_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS -# define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION -# endif // _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES - -# if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES) -# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS -# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION -# define _LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS -# define _LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS -# define _LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR -# define _LIBCPP_ENABLE_CXX20_REMOVED_TYPE_TRAITS -# endif // _LIBCPP_ENABLE_CXX20_REMOVED_FEATURES - // clang-format off # define _LIBCPP_PUSH_MACROS _Pragma("push_macro(\"min\")") _Pragma("push_macro(\"max\")") _Pragma("push_macro(\"refresh\")") _Pragma("push_macro(\"move\")") _Pragma("push_macro(\"erase\")") # define _LIBCPP_POP_MACROS _Pragma("pop_macro(\"min\")") _Pragma("pop_macro(\"max\")") _Pragma("pop_macro(\"refresh\")") _Pragma("pop_macro(\"move\")") _Pragma("pop_macro(\"erase\")") @@ -1322,23 +1016,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c // the ABI inconsistent. # endif -# ifdef _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") -# define _LIBCPP_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(clang diagnostic ignored str)) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) -# elif defined(_LIBCPP_COMPILER_GCC) -# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") -# define _LIBCPP_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(GCC diagnostic ignored str)) -# else -# define _LIBCPP_DIAGNOSTIC_PUSH -# define _LIBCPP_DIAGNOSTIC_POP -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) -# endif - // c8rtomb() and mbrtoc8() were added in C++20 and C23. Support for these // functions is gradually being added to existing C libraries. The conditions // below check for known C library versions and conditions under which these @@ -1447,7 +1124,7 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_USING_IF_EXISTS # endif -# if __has_cpp_attribute(nodiscard) +# if __has_cpp_attribute(__nodiscard__) # define _LIBCPP_NODISCARD [[__nodiscard__]] # else // We can't use GCC's [[gnu::warn_unused_result]] and @@ -1456,27 +1133,13 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_NODISCARD # endif -// _LIBCPP_NODISCARD_EXT may be used to apply [[nodiscard]] to entities not -// specified as such as an extension. -# if !defined(_LIBCPP_DISABLE_NODISCARD_EXT) -# define _LIBCPP_NODISCARD_EXT _LIBCPP_NODISCARD -# else -# define _LIBCPP_NODISCARD_EXT -# endif - -# if _LIBCPP_STD_VER >= 20 || !defined(_LIBCPP_DISABLE_NODISCARD_EXT) -# define _LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_NODISCARD -# else -# define _LIBCPP_NODISCARD_AFTER_CXX17 -# endif - # if __has_attribute(__no_destroy__) # define _LIBCPP_NO_DESTROY __attribute__((__no_destroy__)) # else # define _LIBCPP_NO_DESTROY # endif -# if __has_attribute(__diagnose_if__) && !defined(_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS) +# if __has_attribute(__diagnose_if__) # define _LIBCPP_DIAGNOSE_WARNING(...) __attribute__((__diagnose_if__(__VA_ARGS__, "warning"))) # else # define _LIBCPP_DIAGNOSE_WARNING(...) diff --git a/lib/libcxx/include/__configuration/abi.h b/lib/libcxx/include/__configuration/abi.h new file mode 100644 index 0000000000..cfd8781213 --- /dev/null +++ b/lib/libcxx/include/__configuration/abi.h @@ -0,0 +1,172 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_ABI_H +#define _LIBCPP___CONFIGURATION_ABI_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ +#include <__configuration/compiler.h> +#include <__configuration/platform.h> + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if _LIBCPP_ABI_VERSION >= 2 +// Change short string representation so that string data starts at offset 0, +// improving its alignment in some cases. +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +// Fix deque iterator type in order to support incomplete types. +# define _LIBCPP_ABI_INCOMPLETE_TYPES_IN_DEQUE +// Fix undefined behavior in how std::list stores its linked nodes. +# define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __tree stores its end and parent nodes. +# define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __hash_table stores its pointer types. +# define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB +# define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB +# define _LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE +// Override the default return value of exception::what() for bad_function_call::what() +// with a string that is specific to bad_function_call (see http://wg21.link/LWG2233). +// This is an ABI break on platforms that sign and authenticate vtable function pointers +// because it changes the mangling of the virtual function located in the vtable, which +// changes how it gets signed. +# define _LIBCPP_ABI_BAD_FUNCTION_CALL_GOOD_WHAT_MESSAGE +// Enable optimized version of __do_get_(un)signed which avoids redundant copies. +# define _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET +// Give reverse_iterator one data member of type T, not two. +// Also, in C++17 and later, don't derive iterator types from std::iterator. +# define _LIBCPP_ABI_NO_ITERATOR_BASES +// Use the smallest possible integer type to represent the index of the variant. +// Previously libc++ used "unsigned int" exclusively. +# define _LIBCPP_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION +// Unstable attempt to provide a more optimized std::function +# define _LIBCPP_ABI_OPTIMIZED_FUNCTION +// All the regex constants must be distinct and nonzero. +# define _LIBCPP_ABI_REGEX_CONSTANTS_NONZERO +// Re-worked external template instantiations for std::string with a focus on +// performance and fast-path inlining. +# define _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION +// Enable clang::trivial_abi on std::unique_ptr. +# define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI +// Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr +# define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI +// std::random_device holds some state when it uses an implementation that gets +// entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this +// implementation to another one on a platform that has already shipped +// std::random_device, one needs to retain the same object layout to remain ABI +// compatible. This switch removes these workarounds for platforms that don't care +// about ABI compatibility. +# define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT +// Don't export the legacy __basic_string_common class and its methods from the built library. +# define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON +// Don't export the legacy __vector_base_common class and its methods from the built library. +# define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON +// According to the Standard, `bitset::operator[] const` returns bool +# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL +// Fix the implementation of CityHash used for std::hash. +// This is an ABI break because `std::hash` will return a different result, +// which means that hashing the same object in translation units built against +// different versions of libc++ can return inconsistent results. This is especially +// tricky since std::hash is used in the implementation of unordered containers. +// +// The incorrect implementation of CityHash has the problem that it drops some +// bits on the floor. +# define _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION +// Remove the base 10 implementation of std::to_chars from the dylib. +// The implementation moved to the header, but we still export the symbols from +// the dylib for backwards compatibility. +# define _LIBCPP_ABI_DO_NOT_EXPORT_TO_CHARS_BASE_10 +// Define std::array/std::string_view iterators to be __wrap_iters instead of raw +// pointers, which prevents people from relying on a non-portable implementation +// detail. This is especially useful because enabling bounded iterators hardening +// requires code not to make these assumptions. +# define _LIBCPP_ABI_USE_WRAP_ITER_IN_STD_ARRAY +# define _LIBCPP_ABI_USE_WRAP_ITER_IN_STD_STRING_VIEW +// Dont' add an inline namespace for `std::filesystem` +# define _LIBCPP_ABI_NO_FILESYSTEM_INLINE_NAMESPACE +// std::basic_ios uses WEOF to indicate that the fill value is +// uninitialized. However, on platforms where the size of char_type is +// equal to or greater than the size of int_type and char_type is unsigned, +// std::char_traits::eq_int_type() cannot distinguish between WEOF +// and WCHAR_MAX. This ABI setting determines whether we should instead track whether the fill +// value has been initialized using a separate boolean, which changes the ABI. +# define _LIBCPP_ABI_IOS_ALLOW_ARBITRARY_FILL_VALUE +// Make a std::pair of trivially copyable types trivially copyable. +// While this technically doesn't change the layout of pair itself, other types may decide to programatically change +// their representation based on whether something is trivially copyable. +# define _LIBCPP_ABI_TRIVIALLY_COPYABLE_PAIR +#elif _LIBCPP_ABI_VERSION == 1 +# if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) +// Enable compiling copies of now inline methods into the dylib to support +// applications compiled against older libraries. This is unnecessary with +// COFF dllexport semantics, since dllexport forces a non-inline definition +// of inline functions to be emitted anyway. Our own non-inline copy would +// conflict with the dllexport-emitted copy, so we disable it. For XCOFF, +// the linker will take issue with the symbols in the shared object if the +// weak inline methods get visibility (such as from -fvisibility-inlines-hidden), +// so disable it. +# define _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS +# endif +// Feature macros for disabling pre ABI v1 features. All of these options +// are deprecated. +# if defined(__FreeBSD__) && __FreeBSD__ < 14 +# define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR +# endif +#endif + +// We had some bugs where we use [[no_unique_address]] together with construct_at, +// which causes UB as the call on construct_at could write to overlapping subobjects +// +// https://github.com/llvm/llvm-project/issues/70506 +// https://github.com/llvm/llvm-project/issues/70494 +// +// To fix the bug we had to change the ABI of some classes to remove [[no_unique_address]] under certain conditions. +// The macro below is used for all classes whose ABI have changed as part of fixing these bugs. +#define _LIBCPP_ABI_LLVM18_NO_UNIQUE_ADDRESS __attribute__((__abi_tag__("llvm18_nua"))) + +// Changes the iterator type of select containers (see below) to a bounded iterator that keeps track of whether it's +// within the bounds of the original container and asserts it on every dereference. +// +// ABI impact: changes the iterator type of the relevant containers. +// +// Supported containers: +// - `span`; +// - `string_view`. +// #define _LIBCPP_ABI_BOUNDED_ITERATORS + +// Changes the iterator type of `basic_string` to a bounded iterator that keeps track of whether it's within the bounds +// of the original container and asserts it on every dereference and when performing iterator arithmetics. +// +// ABI impact: changes the iterator type of `basic_string` and its specializations, such as `string` and `wstring`. +// #define _LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING + +// Changes the iterator type of `vector` to a bounded iterator that keeps track of whether it's within the bounds of the +// original container and asserts it on every dereference and when performing iterator arithmetics. Note: this doesn't +// yet affect `vector`. +// +// ABI impact: changes the iterator type of `vector` (except `vector`). +// #define _LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR + +#if defined(_LIBCPP_COMPILER_CLANG_BASED) +# if defined(__APPLE__) +# if defined(__i386__) || defined(__x86_64__) +// use old string layout on x86_64 and i386 +# elif defined(__arm__) +// use old string layout on arm (which does not include aarch64/arm64), except on watch ABIs +# if defined(__ARM_ARCH_7K__) && __ARM_ARCH_7K__ >= 2 +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +# endif +# else +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +# endif +# endif +#endif + +#endif // _LIBCPP___CONFIGURATION_ABI_H diff --git a/lib/libcxx/include/__configuration/availability.h b/lib/libcxx/include/__configuration/availability.h new file mode 100644 index 0000000000..ab483a07c9 --- /dev/null +++ b/lib/libcxx/include/__configuration/availability.h @@ -0,0 +1,400 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_AVAILABILITY_H +#define _LIBCPP___CONFIGURATION_AVAILABILITY_H + +#include <__configuration/compiler.h> +#include <__configuration/language.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +// Libc++ is shipped by various vendors. In particular, it is used as a system +// library on macOS, iOS and other Apple platforms. In order for users to be +// able to compile a binary that is intended to be deployed to an older version +// of a platform, Clang provides availability attributes [1]. These attributes +// can be placed on declarations and are used to describe the life cycle of a +// symbol in the library. +// +// The main goal is to ensure a compile-time error if a symbol that hasn't been +// introduced in a previously released library is used in a program that targets +// that previously released library. Normally, this would be a load-time error +// when one tries to launch the program against the older library. +// +// For example, the filesystem library was introduced in the dylib in LLVM 9. +// On Apple platforms, this corresponds to macOS 10.15. If a user compiles on +// a macOS 10.15 host but targets macOS 10.13 with their program, the compiler +// would normally not complain (because the required declarations are in the +// headers), but the dynamic loader would fail to find the symbols when actually +// trying to launch the program on macOS 10.13. To turn this into a compile-time +// issue instead, declarations are annotated with when they were introduced, and +// the compiler can produce a diagnostic if the program references something that +// isn't available on the deployment target. +// +// This mechanism is general in nature, and any vendor can add their markup to +// the library (see below). Whenever a new feature is added that requires support +// in the shared library, two macros are added below to allow marking the feature +// as unavailable: +// 1. A macro named `_LIBCPP_AVAILABILITY_HAS_` which must be defined +// to `_LIBCPP_INTRODUCED_IN_` for the appropriate LLVM version. +// 2. A macro named `_LIBCPP_AVAILABILITY_`, which must be defined to +// `_LIBCPP_INTRODUCED_IN__MARKUP` for the appropriate LLVM version. +// +// When vendors decide to ship the feature as part of their shared library, they +// can update the `_LIBCPP_INTRODUCED_IN_` macro (and the markup counterpart) +// based on the platform version they shipped that version of LLVM in. The library +// will then use this markup to provide an optimal user experience on these platforms. +// +// Furthermore, many features in the standard library have corresponding +// feature-test macros. The `_LIBCPP_AVAILABILITY_HAS_` macros +// are checked by the corresponding feature-test macros generated by +// generate_feature_test_macro_components.py to ensure that the library +// doesn't announce a feature as being implemented if it is unavailable on +// the deployment target. +// +// Note that this mechanism is disabled by default in the "upstream" libc++. +// Availability annotations are only meaningful when shipping libc++ inside +// a platform (i.e. as a system library), and so vendors that want them should +// turn those annotations on at CMake configuration time. +// +// [1]: https://clang.llvm.org/docs/AttributeReference.html#availability + +// For backwards compatibility, allow users to define _LIBCPP_DISABLE_AVAILABILITY +// for a while. +#if defined(_LIBCPP_DISABLE_AVAILABILITY) +# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif +#endif + +// Availability markup is disabled when building the library, or when a non-Clang +// compiler is used because only Clang supports the necessary attributes. +#if defined(_LIBCPP_BUILDING_LIBRARY) || defined(_LIBCXXABI_BUILDING_LIBRARY) || !defined(_LIBCPP_COMPILER_CLANG_BASED) +# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif +#endif + +// When availability annotations are disabled, we take for granted that features introduced +// in all versions of the library are available. +#if defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) + +# define _LIBCPP_INTRODUCED_IN_LLVM_19 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_18 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_17 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_17_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_16 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_16_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_15 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_14 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_13 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_13_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_12 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_12_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_11 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_10 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_10_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_9 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE /* nothing */ +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH /* nothing */ +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_8 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_8_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_4 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE /* nothing */ + +#elif defined(__APPLE__) + +// clang-format off + +// LLVM 19 +// TODO: Fill this in +# define _LIBCPP_INTRODUCED_IN_LLVM_19 0 +# define _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE __attribute__((unavailable)) + +// LLVM 18 +// TODO: Fill this in +# define _LIBCPP_INTRODUCED_IN_LLVM_18 0 +# define _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE __attribute__((unavailable)) + +// LLVM 17 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170400) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170400) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100400) +# define _LIBCPP_INTRODUCED_IN_LLVM_17 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_17 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_17_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 14.4))) \ + __attribute__((availability(ios, strict, introduced = 17.4))) \ + __attribute__((availability(tvos, strict, introduced = 17.4))) \ + __attribute__((availability(watchos, strict, introduced = 10.4))) + +// LLVM 16 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100000) +# define _LIBCPP_INTRODUCED_IN_LLVM_16 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_16 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_16_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 14.0))) \ + __attribute__((availability(ios, strict, introduced = 17.0))) \ + __attribute__((availability(tvos, strict, introduced = 17.0))) \ + __attribute__((availability(watchos, strict, introduced = 10.0))) + +// LLVM 15 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160500) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160500) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90500) +# define _LIBCPP_INTRODUCED_IN_LLVM_15 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_15 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 13.4))) \ + __attribute__((availability(ios, strict, introduced = 16.5))) \ + __attribute__((availability(tvos, strict, introduced = 16.5))) \ + __attribute__((availability(watchos, strict, introduced = 9.5))) + +// LLVM 14 +# define _LIBCPP_INTRODUCED_IN_LLVM_14 _LIBCPP_INTRODUCED_IN_LLVM_15 +# define _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE + +// LLVM 13 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90000) +# define _LIBCPP_INTRODUCED_IN_LLVM_13 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_13 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_13_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 13.0))) \ + __attribute__((availability(ios, strict, introduced = 16.0))) \ + __attribute__((availability(tvos, strict, introduced = 16.0))) \ + __attribute__((availability(watchos, strict, introduced = 9.0))) + +// LLVM 12 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 120300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 150300) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 150300) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 80300) +# define _LIBCPP_INTRODUCED_IN_LLVM_12 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_12 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_12_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 12.3))) \ + __attribute__((availability(ios, strict, introduced = 15.3))) \ + __attribute__((availability(tvos, strict, introduced = 15.3))) \ + __attribute__((availability(watchos, strict, introduced = 8.3))) + +// LLVM 11 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) +# define _LIBCPP_INTRODUCED_IN_LLVM_11 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_11 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 11.0))) \ + __attribute__((availability(ios, strict, introduced = 14.0))) \ + __attribute__((availability(tvos, strict, introduced = 14.0))) \ + __attribute__((availability(watchos, strict, introduced = 7.0))) + +// LLVM 10 +# define _LIBCPP_INTRODUCED_IN_LLVM_10 _LIBCPP_INTRODUCED_IN_LLVM_11 +# define _LIBCPP_INTRODUCED_IN_LLVM_10_ATTRIBUTE _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE + +// LLVM 9 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) +# define _LIBCPP_INTRODUCED_IN_LLVM_9 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_9 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 10.15))) \ + __attribute__((availability(ios, strict, introduced = 13.0))) \ + __attribute__((availability(tvos, strict, introduced = 13.0))) \ + __attribute__((availability(watchos, strict, introduced = 6.0))) +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH \ + _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") + +// LLVM 4 +# if defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000 +# define _LIBCPP_INTRODUCED_IN_LLVM_4 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_4 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE __attribute__((availability(watchos, strict, introduced = 5.0))) + +// clang-format on + +#else + +// ...New vendors can add availability markup here... + +# error \ + "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" + +#endif + +// These macros control the availability of std::bad_optional_access and +// other exception types. These were put in the shared library to prevent +// code bloat from every user program defining the vtable for these exception +// types. +// +// Note that when exceptions are disabled, the methods that normally throw +// these exceptions can be used even on older deployment targets, but those +// methods will abort instead of throwing. +#define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +#define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +#define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_ANY_CAST _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +// These macros control the availability of all parts of that +// depend on something in the dylib. +#define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY _LIBCPP_INTRODUCED_IN_LLVM_9 +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP + +// This controls the availability of the C++20 synchronization library, +// which requires shared library support for various operations +// (see libcxx/src/atomic.cpp). This includes , , +// , and notification functions on std::atomic. +#define _LIBCPP_AVAILABILITY_HAS_SYNC _LIBCPP_INTRODUCED_IN_LLVM_11 +#define _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE + +// Enable additional explicit instantiations of iostreams components. This +// reduces the number of weak definitions generated in programs that use +// iostreams by providing a single strong definition in the shared library. +// +// TODO: Enable additional explicit instantiations on GCC once it supports exclude_from_explicit_instantiation, +// or once libc++ doesn't use the attribute anymore. +// TODO: Enable them on Windows once https://llvm.org/PR41018 has been fixed. +#if !defined(_LIBCPP_COMPILER_GCC) && !defined(_WIN32) +# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 _LIBCPP_INTRODUCED_IN_LLVM_12 +#else +# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 +#endif + +// This controls the availability of floating-point std::to_chars functions. +// These overloads were added later than the integer overloads. +#define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT _LIBCPP_INTRODUCED_IN_LLVM_14 +#define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE + +// This controls whether the library claims to provide a default verbose +// termination function, and consequently whether the headers will try +// to use it when the mechanism isn't overriden at compile-time. +#define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT _LIBCPP_INTRODUCED_IN_LLVM_15 +#define _LIBCPP_AVAILABILITY_VERBOSE_ABORT _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE + +// This controls the availability of the C++17 std::pmr library, +// which is implemented in large part in the built library. +// +// TODO: Enable std::pmr markup once https://github.com/llvm/llvm-project/issues/40340 has been fixed +// Until then, it is possible for folks to try to use `std::pmr` when back-deploying to targets that don't support +// it and it'll be a load-time error, but we don't have a good alternative because the library won't compile if we +// use availability annotations until that bug has been fixed. +#define _LIBCPP_AVAILABILITY_HAS_PMR _LIBCPP_INTRODUCED_IN_LLVM_16 +#define _LIBCPP_AVAILABILITY_PMR + +// These macros controls the availability of __cxa_init_primary_exception +// in the built library, which std::make_exception_ptr might use +// (see libcxx/include/__exception/exception_ptr.h). +#define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION _LIBCPP_INTRODUCED_IN_LLVM_18 +#define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE + +// This controls the availability of C++23 , which +// has a dependency on the built library (it needs access to +// the underlying buffer types of std::cout, std::cerr, and std::clog. +#define _LIBCPP_AVAILABILITY_HAS_PRINT _LIBCPP_INTRODUCED_IN_LLVM_18 +#define _LIBCPP_AVAILABILITY_PRINT _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE + +// This controls the availability of the C++20 time zone database. +// The parser code is built in the library. +#define _LIBCPP_AVAILABILITY_HAS_TZDB _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_TZDB _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE + +// These macros determine whether we assume that std::bad_function_call and +// std::bad_expected_access provide a key function in the dylib. This allows +// centralizing their vtable and typeinfo instead of having all TUs provide +// a weak definition that then gets deduplicated. +#define _LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE +#define _LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_BAD_EXPECTED_ACCESS_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE + +// Define availability attributes that depend on _LIBCPP_HAS_NO_EXCEPTIONS. +// Those are defined in terms of the availability attributes above, and +// should not be vendor-specific. +#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) +# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST +# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS +# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS +#else +# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_ANY_CAST +# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS +#endif + +// Define availability attributes that depend on both +// _LIBCPP_HAS_NO_EXCEPTIONS and _LIBCPP_HAS_NO_RTTI. +#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) || defined(_LIBCPP_HAS_NO_RTTI) +# undef _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION +# undef _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION +# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 +# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION +#endif + +#endif // _LIBCPP___CONFIGURATION_AVAILABILITY_H diff --git a/lib/libcxx/include/__configuration/compiler.h b/lib/libcxx/include/__configuration/compiler.h new file mode 100644 index 0000000000..d109aa748f --- /dev/null +++ b/lib/libcxx/include/__configuration/compiler.h @@ -0,0 +1,51 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_COMPILER_H +#define _LIBCPP___CONFIGURATION_COMPILER_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if defined(__apple_build_version__) +// Given AppleClang XX.Y.Z, _LIBCPP_APPLE_CLANG_VER is XXYZ (e.g. AppleClang 14.0.3 => 1403) +# define _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000) +#elif defined(__clang__) +# define _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__) +#elif defined(__GNUC__) +# define _LIBCPP_COMPILER_GCC +# define _LIBCPP_GCC_VER (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + +#ifdef __cplusplus + +// Warn if a compiler version is used that is not supported anymore +// LLVM RELEASE Update the minimum compiler versions +# if defined(_LIBCPP_CLANG_VER) +# if _LIBCPP_CLANG_VER < 1700 +# warning "Libc++ only supports Clang 17 and later" +# endif +# elif defined(_LIBCPP_APPLE_CLANG_VER) +# if _LIBCPP_APPLE_CLANG_VER < 1500 +# warning "Libc++ only supports AppleClang 15 and later" +# endif +# elif defined(_LIBCPP_GCC_VER) +# if _LIBCPP_GCC_VER < 1400 +# warning "Libc++ only supports GCC 14 and later" +# endif +# endif + +#endif + +#endif // _LIBCPP___CONFIGURATION_COMPILER_H diff --git a/lib/libcxx/include/__configuration/language.h b/lib/libcxx/include/__configuration/language.h new file mode 100644 index 0000000000..cca6c71486 --- /dev/null +++ b/lib/libcxx/include/__configuration/language.h @@ -0,0 +1,46 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_LANGUAGE_H +#define _LIBCPP___CONFIGURATION_LANGUAGE_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +// NOLINTBEGIN(libcpp-cpp-version-check) +#ifdef __cplusplus +# if __cplusplus <= 201103L +# define _LIBCPP_STD_VER 11 +# elif __cplusplus <= 201402L +# define _LIBCPP_STD_VER 14 +# elif __cplusplus <= 201703L +# define _LIBCPP_STD_VER 17 +# elif __cplusplus <= 202002L +# define _LIBCPP_STD_VER 20 +# elif __cplusplus <= 202302L +# define _LIBCPP_STD_VER 23 +# else +// Expected release year of the next C++ standard +# define _LIBCPP_STD_VER 26 +# endif +#endif // __cplusplus +// NOLINTEND(libcpp-cpp-version-check) + +#if !defined(__cpp_rtti) || __cpp_rtti < 199711L +# define _LIBCPP_HAS_NO_RTTI +#endif + +#if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L +# define _LIBCPP_HAS_NO_EXCEPTIONS +#endif + +#endif // _LIBCPP___CONFIGURATION_LANGUAGE_H diff --git a/lib/libcxx/include/__configuration/platform.h b/lib/libcxx/include/__configuration/platform.h new file mode 100644 index 0000000000..540b30c558 --- /dev/null +++ b/lib/libcxx/include/__configuration/platform.h @@ -0,0 +1,54 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_PLATFORM_H +#define _LIBCPP___CONFIGURATION_PLATFORM_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if defined(__ELF__) +# define _LIBCPP_OBJECT_FORMAT_ELF 1 +#elif defined(__MACH__) +# define _LIBCPP_OBJECT_FORMAT_MACHO 1 +#elif defined(_WIN32) +# define _LIBCPP_OBJECT_FORMAT_COFF 1 +#elif defined(__wasm__) +# define _LIBCPP_OBJECT_FORMAT_WASM 1 +#elif defined(_AIX) +# define _LIBCPP_OBJECT_FORMAT_XCOFF 1 +#else +// ... add new file formats here ... +#endif + +// Need to detect which libc we're using if we're on Linux. +#if defined(__linux__) +# include +# if defined(__GLIBC_PREREQ) +# define _LIBCPP_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) +# else +# define _LIBCPP_GLIBC_PREREQ(a, b) 0 +# endif // defined(__GLIBC_PREREQ) +#endif // defined(__linux__) + +#ifndef __BYTE_ORDER__ +# error \ + "Your compiler doesn't seem to define __BYTE_ORDER__, which is required by libc++ to know the endianness of your target platform" +#endif + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define _LIBCPP_LITTLE_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define _LIBCPP_BIG_ENDIAN +#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + +#endif // _LIBCPP___CONFIGURATION_PLATFORM_H diff --git a/lib/libcxx/include/__debug_utils/sanitizers.h b/lib/libcxx/include/__debug_utils/sanitizers.h new file mode 100644 index 0000000000..d8547e3249 --- /dev/null +++ b/lib/libcxx/include/__debug_utils/sanitizers.h @@ -0,0 +1,104 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H +#define _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H + +#include <__config> +#include <__type_traits/integral_constant.h> +#include <__type_traits/is_constant_evaluated.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +#ifndef _LIBCPP_HAS_NO_ASAN + +extern "C" { +_LIBCPP_EXPORTED_FROM_ABI void +__sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); +_LIBCPP_EXPORTED_FROM_ABI void __sanitizer_annotate_double_ended_contiguous_container( + const void*, const void*, const void*, const void*, const void*, const void*); +_LIBCPP_EXPORTED_FROM_ABI int +__sanitizer_verify_double_ended_contiguous_container(const void*, const void*, const void*, const void*); +} + +#endif // _LIBCPP_HAS_NO_ASAN + +_LIBCPP_BEGIN_NAMESPACE_STD + +// ASan choices +#ifndef _LIBCPP_HAS_NO_ASAN +# define _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS 1 +#endif + +#ifdef _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS +// __asan_annotate_container_with_allocator determines whether containers with custom allocators are annotated. This is +// a public customization point to disable annotations if the custom allocator assumes that the memory isn't poisoned. +// See the https://libcxx.llvm.org/UsingLibcxx.html#turning-off-asan-annotation-in-containers for more information. +template +struct __asan_annotate_container_with_allocator : true_type {}; +#endif + +// Annotate a double-ended contiguous range. +// - [__first_storage, __last_storage) is the allocated memory region, +// - [__first_old_contained, __last_old_contained) is the previously allowed (unpoisoned) range, and +// - [__first_new_contained, __last_new_contained) is the new allowed (unpoisoned) range. +template +_LIBCPP_HIDE_FROM_ABI void __annotate_double_ended_contiguous_container( + const void* __first_storage, + const void* __last_storage, + const void* __first_old_contained, + const void* __last_old_contained, + const void* __first_new_contained, + const void* __last_new_contained) { +#ifdef _LIBCPP_HAS_NO_ASAN + (void)__first_storage; + (void)__last_storage; + (void)__first_old_contained; + (void)__last_old_contained; + (void)__first_new_contained; + (void)__last_new_contained; +#else + if (__asan_annotate_container_with_allocator<_Allocator>::value && __first_storage != nullptr) + __sanitizer_annotate_double_ended_contiguous_container( + __first_storage, + __last_storage, + __first_old_contained, + __last_old_contained, + __first_new_contained, + __last_new_contained); +#endif +} + +// Annotate a contiguous range. +// [__first_storage, __last_storage) is the allocated memory region, +// __old_last_contained is the previously last allowed (unpoisoned) element, and +// __new_last_contained is the new last allowed (unpoisoned) element. +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 void __annotate_contiguous_container( + const void* __first_storage, + const void* __last_storage, + const void* __old_last_contained, + const void* __new_last_contained) { +#ifdef _LIBCPP_HAS_NO_ASAN + (void)__first_storage; + (void)__last_storage; + (void)__old_last_contained; + (void)__new_last_contained; +#else + if (!__libcpp_is_constant_evaluated() && __asan_annotate_container_with_allocator<_Allocator>::value && + __first_storage != nullptr) + __sanitizer_annotate_contiguous_container( + __first_storage, __last_storage, __old_last_contained, __new_last_contained); +#endif +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H diff --git a/lib/libcxx/include/__exception/exception_ptr.h b/lib/libcxx/include/__exception/exception_ptr.h index 53e2f718bc..beadd9212a 100644 --- a/lib/libcxx/include/__exception/exception_ptr.h +++ b/lib/libcxx/include/__exception/exception_ptr.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___EXCEPTION_EXCEPTION_PTR_H #define _LIBCPP___EXCEPTION_EXCEPTION_PTR_H -#include <__availability> #include <__config> #include <__exception/operations.h> #include <__memory/addressof.h> @@ -26,6 +25,8 @@ #ifndef _LIBCPP_ABI_MICROSOFT +# if _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION + namespace __cxxabiv1 { extern "C" { @@ -36,15 +37,20 @@ struct __cxa_exception; _LIBCPP_OVERRIDABLE_FUNC_VIS __cxa_exception* __cxa_init_primary_exception( void*, std::type_info*, - void( -# if defined(_WIN32) - __thiscall -# endif - *)(void*)) throw(); +# if defined(_WIN32) + void(__thiscall*)(void*)) throw(); +# elif defined(__wasm__) + // In Wasm, a destructor returns its argument + void* (*)(void*)) throw(); +# else + void (*)(void*)) throw(); +# endif } } // namespace __cxxabiv1 +# endif + #endif namespace std { // purposefully not using versioning namespace @@ -60,6 +66,9 @@ class _LIBCPP_EXPORTED_FROM_ABI exception_ptr { friend _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep) _NOEXCEPT; public: + // exception_ptr is basically a COW string. + using __trivially_relocatable = exception_ptr; + _LIBCPP_HIDE_FROM_ABI exception_ptr() _NOEXCEPT : __ptr_() {} _LIBCPP_HIDE_FROM_ABI exception_ptr(nullptr_t) _NOEXCEPT : __ptr_() {} @@ -88,9 +97,18 @@ _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep __e) _NOEXCEPT { using _Ep2 = __decay_t<_Ep>; void* __ex = __cxxabiv1::__cxa_allocate_exception(sizeof(_Ep)); +# ifdef __wasm__ + // In Wasm, a destructor returns its argument + (void)__cxxabiv1::__cxa_init_primary_exception( + __ex, const_cast(&typeid(_Ep)), [](void* __p) -> void* { +# else (void)__cxxabiv1::__cxa_init_primary_exception(__ex, const_cast(&typeid(_Ep)), [](void* __p) { - std::__destroy_at(static_cast<_Ep2*>(__p)); - }); +# endif + std::__destroy_at(static_cast<_Ep2*>(__p)); +# ifdef __wasm__ + return __p; +# endif + }); try { ::new (__ex) _Ep2(__e); diff --git a/lib/libcxx/include/__exception/nested_exception.h b/lib/libcxx/include/__exception/nested_exception.h index 417db54e6e..feb489f87f 100644 --- a/lib/libcxx/include/__exception/nested_exception.h +++ b/lib/libcxx/include/__exception/nested_exception.h @@ -15,8 +15,8 @@ #include <__type_traits/decay.h> #include <__type_traits/is_base_of.h> #include <__type_traits/is_class.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_constructible.h> #include <__type_traits/is_final.h> #include <__type_traits/is_polymorphic.h> #include <__utility/forward.h> @@ -84,17 +84,15 @@ struct __can_dynamic_cast : _BoolConstant< is_polymorphic<_From>::value && (!is_base_of<_To, _From>::value || is_convertible::value)> {}; -template -inline _LIBCPP_HIDE_FROM_ABI void -rethrow_if_nested(const _Ep& __e, __enable_if_t< __can_dynamic_cast<_Ep, nested_exception>::value>* = 0) { +template ::value, int> = 0> +inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep& __e) { const nested_exception* __nep = dynamic_cast(std::addressof(__e)); if (__nep) __nep->rethrow_nested(); } -template -inline _LIBCPP_HIDE_FROM_ABI void -rethrow_if_nested(const _Ep&, __enable_if_t::value>* = 0) {} +template ::value, int> = 0> +inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep&) {} } // namespace std diff --git a/lib/libcxx/include/__exception/operations.h b/lib/libcxx/include/__exception/operations.h index 8f374c0cce..0a9c7a7c7f 100644 --- a/lib/libcxx/include/__exception/operations.h +++ b/lib/libcxx/include/__exception/operations.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___EXCEPTION_OPERATIONS_H #define _LIBCPP___EXCEPTION_OPERATIONS_H -#include <__availability> #include <__config> #include diff --git a/lib/libcxx/include/__expected/bad_expected_access.h b/lib/libcxx/include/__expected/bad_expected_access.h index 27f01d9350..1b734389e8 100644 --- a/lib/libcxx/include/__expected/bad_expected_access.h +++ b/lib/libcxx/include/__expected/bad_expected_access.h @@ -27,23 +27,28 @@ _LIBCPP_BEGIN_NAMESPACE_STD template class bad_expected_access; +_LIBCPP_DIAGNOSTIC_PUSH +# if !_LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION +_LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wweak-vtables") +# endif template <> -class bad_expected_access : public exception { +class _LIBCPP_EXPORTED_FROM_ABI bad_expected_access : public exception { protected: - _LIBCPP_HIDE_FROM_ABI bad_expected_access() noexcept = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access(const bad_expected_access&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access(bad_expected_access&&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(const bad_expected_access&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(bad_expected_access&&) = default; - _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_expected_access() override = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access() noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access(const bad_expected_access&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access(bad_expected_access&&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(const bad_expected_access&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(bad_expected_access&&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_expected_access() override = default; public: - // The way this has been designed (by using a class template below) means that we'll already - // have a profusion of these vtables in TUs, and the dynamic linker will already have a bunch - // of work to do. So it is not worth hiding the specialization in the dylib, given that - // it adds deployment target restrictions. +# if _LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION + const char* what() const noexcept override; +# else _LIBCPP_HIDE_FROM_ABI_VIRTUAL const char* what() const noexcept override { return "bad access to std::expected"; } +# endif }; +_LIBCPP_DIAGNOSTIC_POP template class bad_expected_access : public bad_expected_access { diff --git a/lib/libcxx/include/__expected/expected.h b/lib/libcxx/include/__expected/expected.h index 443d9257dc..f618b20603 100644 --- a/lib/libcxx/include/__expected/expected.h +++ b/lib/libcxx/include/__expected/expected.h @@ -23,24 +23,15 @@ #include <__type_traits/is_assignable.h> #include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> #include <__type_traits/is_function.h> -#include <__type_traits/is_move_assignable.h> -#include <__type_traits/is_move_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> #include <__type_traits/is_nothrow_constructible.h> -#include <__type_traits/is_nothrow_copy_assignable.h> -#include <__type_traits/is_nothrow_copy_constructible.h> -#include <__type_traits/is_nothrow_default_constructible.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> #include <__type_traits/is_reference.h> #include <__type_traits/is_same.h> #include <__type_traits/is_swappable.h> -#include <__type_traits/is_trivially_copy_constructible.h> +#include <__type_traits/is_trivially_constructible.h> #include <__type_traits/is_trivially_destructible.h> -#include <__type_traits/is_trivially_move_constructible.h> +#include <__type_traits/is_trivially_relocatable.h> #include <__type_traits/is_void.h> #include <__type_traits/lazy.h> #include <__type_traits/negation.h> @@ -473,6 +464,11 @@ public: using error_type = _Err; using unexpected_type = unexpected<_Err>; + using __trivially_relocatable = + __conditional_t<__libcpp_is_trivially_relocatable<_Tp>::value && __libcpp_is_trivially_relocatable<_Err>::value, + expected, + void>; + template using rebind = expected<_Up, error_type>; @@ -511,7 +507,9 @@ private: _And< is_constructible<_Tp, _UfQual>, is_constructible<_Err, _OtherErrQual>, _If<_Not, bool>>::value, - _And< _Not&>>, + _And< + _Not<_And, is_same<_Err, _OtherErr>>>, // use the copy constructor instead, see #92676 + _Not&>>, _Not>>, _Not&>>, _Not>>, diff --git a/lib/libcxx/include/__filesystem/copy_options.h b/lib/libcxx/include/__filesystem/copy_options.h index 1bf71292c8..097eebe611 100644 --- a/lib/libcxx/include/__filesystem/copy_options.h +++ b/lib/libcxx/include/__filesystem/copy_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_COPY_OPTIONS_H #define _LIBCPP___FILESYSTEM_COPY_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/directory_entry.h b/lib/libcxx/include/__filesystem/directory_entry.h index 016ad94a85..96d88dcd90 100644 --- a/lib/libcxx/include/__filesystem/directory_entry.h +++ b/lib/libcxx/include/__filesystem/directory_entry.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_DIRECTORY_ENTRY_H #define _LIBCPP___FILESYSTEM_DIRECTORY_ENTRY_H -#include <__availability> #include <__chrono/time_point.h> #include <__compare/ordering.h> #include <__config> diff --git a/lib/libcxx/include/__filesystem/directory_iterator.h b/lib/libcxx/include/__filesystem/directory_iterator.h index a5aa5ff543..e0246d8001 100644 --- a/lib/libcxx/include/__filesystem/directory_iterator.h +++ b/lib/libcxx/include/__filesystem/directory_iterator.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_DIRECTORY_ITERATOR_H #include <__assert> -#include <__availability> #include <__config> #include <__filesystem/directory_entry.h> #include <__filesystem/directory_options.h> diff --git a/lib/libcxx/include/__filesystem/directory_options.h b/lib/libcxx/include/__filesystem/directory_options.h index 683c4678e0..d0cd3ebfda 100644 --- a/lib/libcxx/include/__filesystem/directory_options.h +++ b/lib/libcxx/include/__filesystem/directory_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_DIRECTORY_OPTIONS_H #define _LIBCPP___FILESYSTEM_DIRECTORY_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/file_status.h b/lib/libcxx/include/__filesystem/file_status.h index 3e2b32eef8..da316c8b02 100644 --- a/lib/libcxx/include/__filesystem/file_status.h +++ b/lib/libcxx/include/__filesystem/file_status.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_STATUS_H #define _LIBCPP___FILESYSTEM_FILE_STATUS_H -#include <__availability> #include <__config> #include <__filesystem/file_type.h> #include <__filesystem/perms.h> diff --git a/lib/libcxx/include/__filesystem/file_time_type.h b/lib/libcxx/include/__filesystem/file_time_type.h index e086dbcc3f..63e4ae1578 100644 --- a/lib/libcxx/include/__filesystem/file_time_type.h +++ b/lib/libcxx/include/__filesystem/file_time_type.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_TIME_TYPE_H #define _LIBCPP___FILESYSTEM_FILE_TIME_TYPE_H -#include <__availability> #include <__chrono/file_clock.h> #include <__chrono/time_point.h> #include <__config> diff --git a/lib/libcxx/include/__filesystem/file_type.h b/lib/libcxx/include/__filesystem/file_type.h index c509085d90..e4ac1dfee9 100644 --- a/lib/libcxx/include/__filesystem/file_type.h +++ b/lib/libcxx/include/__filesystem/file_type.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_TYPE_H #define _LIBCPP___FILESYSTEM_FILE_TYPE_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/filesystem_error.h b/lib/libcxx/include/__filesystem/filesystem_error.h index bfdcc5eaee..80a11e3b19 100644 --- a/lib/libcxx/include/__filesystem/filesystem_error.h +++ b/lib/libcxx/include/__filesystem/filesystem_error.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILESYSTEM_ERROR_H #define _LIBCPP___FILESYSTEM_FILESYSTEM_ERROR_H -#include <__availability> #include <__config> #include <__filesystem/path.h> #include <__memory/shared_ptr.h> diff --git a/lib/libcxx/include/__filesystem/operations.h b/lib/libcxx/include/__filesystem/operations.h index 9bb83576f5..f588189ed1 100644 --- a/lib/libcxx/include/__filesystem/operations.h +++ b/lib/libcxx/include/__filesystem/operations.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_OPERATIONS_H #define _LIBCPP___FILESYSTEM_OPERATIONS_H -#include <__availability> #include <__chrono/time_point.h> #include <__config> #include <__filesystem/copy_options.h> diff --git a/lib/libcxx/include/__filesystem/path.h b/lib/libcxx/include/__filesystem/path.h index 8c7d426f7a..ff468d5177 100644 --- a/lib/libcxx/include/__filesystem/path.h +++ b/lib/libcxx/include/__filesystem/path.h @@ -12,11 +12,9 @@ #include <__algorithm/replace.h> #include <__algorithm/replace_copy.h> -#include <__availability> #include <__config> -#include <__functional/hash.h> #include <__functional/unary_function.h> -#include <__fwd/hash.h> +#include <__fwd/functional.h> #include <__iterator/back_insert_iterator.h> #include <__iterator/iterator_traits.h> #include <__type_traits/decay.h> @@ -813,7 +811,7 @@ public: _LIBCPP_HIDE_FROM_ABI path extension() const { return string_type(__extension()); } // query - _LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI bool empty() const noexcept { return __pn_.empty(); } + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool empty() const noexcept { return __pn_.empty(); } _LIBCPP_HIDE_FROM_ABI bool has_root_name() const { return !__root_name().empty(); } _LIBCPP_HIDE_FROM_ABI bool has_root_directory() const { return !__root_directory().empty(); } diff --git a/lib/libcxx/include/__filesystem/path_iterator.h b/lib/libcxx/include/__filesystem/path_iterator.h index d2d65cd122..f4d486d86c 100644 --- a/lib/libcxx/include/__filesystem/path_iterator.h +++ b/lib/libcxx/include/__filesystem/path_iterator.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_PATH_ITERATOR_H #include <__assert> -#include <__availability> #include <__config> #include <__filesystem/path.h> #include <__iterator/iterator_traits.h> diff --git a/lib/libcxx/include/__filesystem/perm_options.h b/lib/libcxx/include/__filesystem/perm_options.h index 529ef13558..64c16ee60a 100644 --- a/lib/libcxx/include/__filesystem/perm_options.h +++ b/lib/libcxx/include/__filesystem/perm_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_PERM_OPTIONS_H #define _LIBCPP___FILESYSTEM_PERM_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/perms.h b/lib/libcxx/include/__filesystem/perms.h index 8f5f9a7e82..458f1e6e53 100644 --- a/lib/libcxx/include/__filesystem/perms.h +++ b/lib/libcxx/include/__filesystem/perms.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_PERMS_H #define _LIBCPP___FILESYSTEM_PERMS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/recursive_directory_iterator.h b/lib/libcxx/include/__filesystem/recursive_directory_iterator.h index a8af4f73b1..caa1396eb3 100644 --- a/lib/libcxx/include/__filesystem/recursive_directory_iterator.h +++ b/lib/libcxx/include/__filesystem/recursive_directory_iterator.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_RECURSIVE_DIRECTORY_ITERATOR_H #define _LIBCPP___FILESYSTEM_RECURSIVE_DIRECTORY_ITERATOR_H -#include <__availability> #include <__config> #include <__filesystem/directory_entry.h> #include <__filesystem/directory_options.h> diff --git a/lib/libcxx/include/__filesystem/space_info.h b/lib/libcxx/include/__filesystem/space_info.h index 2e80ae3b2c..3fa57d3309 100644 --- a/lib/libcxx/include/__filesystem/space_info.h +++ b/lib/libcxx/include/__filesystem/space_info.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_SPACE_INFO_H #define _LIBCPP___FILESYSTEM_SPACE_INFO_H -#include <__availability> #include <__config> #include diff --git a/lib/libcxx/include/__filesystem/u8path.h b/lib/libcxx/include/__filesystem/u8path.h index bde8780548..dae5823128 100644 --- a/lib/libcxx/include/__filesystem/u8path.h +++ b/lib/libcxx/include/__filesystem/u8path.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_U8PATH_H #include <__algorithm/unwrap_iter.h> -#include <__availability> #include <__config> #include <__filesystem/path.h> #include diff --git a/lib/libcxx/include/__format/concepts.h b/lib/libcxx/include/__format/concepts.h index 299c5f40ee..13380e9b91 100644 --- a/lib/libcxx/include/__format/concepts.h +++ b/lib/libcxx/include/__format/concepts.h @@ -13,12 +13,14 @@ #include <__concepts/same_as.h> #include <__concepts/semiregular.h> #include <__config> -#include <__format/format_fwd.h> #include <__format/format_parse_context.h> +#include <__fwd/format.h> +#include <__fwd/tuple.h> +#include <__tuple/tuple_size.h> #include <__type_traits/is_specialization.h> #include <__type_traits/remove_const.h> +#include <__type_traits/remove_reference.h> #include <__utility/pair.h> -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__format/container_adaptor.h b/lib/libcxx/include/__format/container_adaptor.h index ec806ef16b..9f49ca03bf 100644 --- a/lib/libcxx/include/__format/container_adaptor.h +++ b/lib/libcxx/include/__format/container_adaptor.h @@ -18,11 +18,11 @@ #include <__format/concepts.h> #include <__format/formatter.h> #include <__format/range_default_formatter.h> +#include <__fwd/queue.h> +#include <__fwd/stack.h> #include <__ranges/ref_view.h> #include <__type_traits/is_const.h> #include <__type_traits/maybe_const.h> -#include -#include _LIBCPP_BEGIN_NAMESPACE_STD diff --git a/lib/libcxx/include/__format/escaped_output_table.h b/lib/libcxx/include/__format/escaped_output_table.h index 495a2fbc7b..f7be2dc61f 100644 --- a/lib/libcxx/include/__format/escaped_output_table.h +++ b/lib/libcxx/include/__format/escaped_output_table.h @@ -80,10 +80,9 @@ namespace __escaped_output_table { /// The entries of the characters to escape in format's debug string. /// /// Contains the entries for [format.string.escaped]/2.2.1.2.1 -/// CE is a Unicode encoding and C corresponds to either a UCS scalar value -/// whose Unicode property General_Category has a value in the groups -/// Separator (Z) or Other (C) or to a UCS scalar value which has the Unicode -/// property Grapheme_Extend=Yes, as described by table 12 of UAX #44 +/// CE is a Unicode encoding and C corresponds to a UCS scalar value whose +/// Unicode property General_Category has a value in the groups Separator (Z) +/// or Other (C), as described by table 12 of UAX #44 /// /// Separator (Z) consists of General_Category /// - Space_Separator, @@ -98,7 +97,6 @@ namespace __escaped_output_table { /// - Unassigned. /// /// The data is generated from -/// - https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt /// - https://www.unicode.org/Public/UCD/latest/ucd/extracted/DerivedGeneralCategory.txt /// /// The table is similar to the table @@ -107,926 +105,751 @@ namespace __escaped_output_table { /// table lacks a property, thus having more bits available for the size. /// /// The data has 2 values: -/// - bits [0, 10] The size of the range, allowing 2048 elements. -/// - bits [11, 31] The lower bound code point of the range. The upper bound of -/// the range is lower bound + size. -inline constexpr uint32_t __entries[893] = { - 0x00000020, - 0x0003f821, - 0x00056800, - 0x0018006f, - 0x001bc001, - 0x001c0003, - 0x001c5800, - 0x001c6800, - 0x001d1000, - 0x00241806, - 0x00298000, - 0x002ab801, - 0x002c5801, - 0x002c802d, - 0x002df800, - 0x002e0801, - 0x002e2001, - 0x002e3808, - 0x002f5803, - 0x002fa810, - 0x0030800a, - 0x0030e000, - 0x00325814, - 0x00338000, - 0x0036b007, - 0x0036f805, - 0x00373801, - 0x00375003, - 0x00387001, - 0x00388800, - 0x0039801c, - 0x003d300a, - 0x003d900d, - 0x003f5808, - 0x003fd802, - 0x0040b003, - 0x0040d808, - 0x00412802, - 0x00414806, - 0x0041f800, - 0x0042c804, - 0x0042f800, - 0x00435804, - 0x00447810, - 0x00465038, - 0x0049d000, - 0x0049e000, - 0x004a0807, - 0x004a6800, - 0x004a8806, - 0x004b1001, - 0x004c0800, - 0x004c2000, - 0x004c6801, - 0x004c8801, - 0x004d4800, - 0x004d8800, - 0x004d9802, - 0x004dd002, - 0x004df000, - 0x004e0805, - 0x004e4801, - 0x004e6800, - 0x004e780c, - 0x004ef000, - 0x004f1003, - 0x004ff004, - 0x00502000, - 0x00505803, - 0x00508801, - 0x00514800, - 0x00518800, - 0x0051a000, - 0x0051b800, - 0x0051d003, - 0x00520817, - 0x0052e800, - 0x0052f806, - 0x00538001, - 0x0053a800, - 0x0053b80b, - 0x00542000, - 0x00547000, - 0x00549000, - 0x00554800, - 0x00558800, - 0x0055a000, - 0x0055d002, - 0x00560807, - 0x00565000, - 0x00566802, - 0x0056880e, - 0x00571003, - 0x00579006, - 0x0057d007, - 0x00582000, - 0x00586801, - 0x00588801, - 0x00594800, - 0x00598800, - 0x0059a000, - 0x0059d002, - 0x0059f001, - 0x005a0805, - 0x005a4801, - 0x005a680e, - 0x005af000, - 0x005b1003, - 0x005bc00a, - 0x005c2000, - 0x005c5802, - 0x005c8800, - 0x005cb002, - 0x005cd800, - 0x005ce800, - 0x005d0002, - 0x005d2802, - 0x005d5802, - 0x005dd004, - 0x005e0000, - 0x005e1802, - 0x005e4800, - 0x005e6802, - 0x005e8814, - 0x005fd805, - 0x00602000, - 0x00606800, - 0x00608800, - 0x00614800, - 0x0061d002, - 0x0061f002, - 0x00622812, - 0x0062d801, - 0x0062f001, - 0x00631003, - 0x00638006, - 0x00640800, - 0x00646800, - 0x00648800, - 0x00654800, - 0x0065a000, - 0x0065d002, - 0x0065f800, - 0x00661000, - 0x00662801, - 0x00664800, - 0x00666010, - 0x0066f800, - 0x00671003, - 0x00678000, - 0x0067a00d, - 0x00686800, - 0x00688800, - 0x0069d801, - 0x0069f000, - 0x006a0804, - 0x006a4800, - 0x006a6800, - 0x006a8003, - 0x006ab800, - 0x006b1003, - 0x006c0001, - 0x006c2000, - 0x006cb802, - 0x006d9000, - 0x006de000, - 0x006df001, - 0x006e3808, - 0x006e9005, - 0x006ef806, - 0x006f8001, - 0x006fa80b, - 0x00718800, - 0x0071a00a, - 0x00723807, - 0x0072e024, - 0x00741800, - 0x00742800, - 0x00745800, - 0x00752000, - 0x00753000, - 0x00758800, - 0x0075a008, - 0x0075f001, - 0x00762800, - 0x00763808, - 0x0076d001, - 0x0077001f, - 0x0078c001, - 0x0079a800, - 0x0079b800, - 0x0079c800, - 0x007a4000, - 0x007b6811, - 0x007c0004, - 0x007c3001, - 0x007c6830, - 0x007e3000, - 0x007e6800, - 0x007ed824, - 0x00816803, - 0x00819005, - 0x0081c801, - 0x0081e801, - 0x0082c001, - 0x0082f002, - 0x00838803, - 0x00841000, - 0x00842801, - 0x00846800, - 0x0084e800, - 0x00863000, - 0x00864004, - 0x00867001, - 0x00924800, - 0x00927001, - 0x0092b800, - 0x0092c800, - 0x0092f001, - 0x00944800, - 0x00947001, - 0x00958800, - 0x0095b001, - 0x0095f800, - 0x00960800, - 0x00963001, - 0x0096b800, - 0x00988800, - 0x0098b001, - 0x009ad804, - 0x009be802, - 0x009cd005, - 0x009fb001, - 0x009ff001, - 0x00b40000, - 0x00b4e802, - 0x00b7c806, - 0x00b89002, - 0x00b8b008, - 0x00b99001, - 0x00b9b808, - 0x00ba900d, - 0x00bb6800, - 0x00bb880e, - 0x00bda001, - 0x00bdb806, - 0x00be3000, - 0x00be480a, - 0x00bee802, - 0x00bf5005, - 0x00bfd005, - 0x00c05804, - 0x00c0d005, - 0x00c3c806, - 0x00c42801, - 0x00c54800, - 0x00c55804, - 0x00c7b009, - 0x00c8f803, - 0x00c93801, - 0x00c96003, - 0x00c99000, - 0x00c9c806, - 0x00ca0802, - 0x00cb7001, - 0x00cba80a, - 0x00cd6003, - 0x00ce5005, - 0x00ced802, - 0x00d0b801, - 0x00d0d802, - 0x00d2b000, - 0x00d2c008, - 0x00d31000, - 0x00d32807, - 0x00d3980c, - 0x00d45005, - 0x00d4d005, - 0x00d57055, - 0x00d9a006, - 0x00d9e000, - 0x00da1000, - 0x00da6802, - 0x00db5808, - 0x00dbf802, - 0x00dd1003, - 0x00dd4001, - 0x00dd5802, - 0x00df3000, - 0x00df4001, - 0x00df6800, - 0x00df7802, - 0x00dfa007, - 0x00e16007, - 0x00e1b004, - 0x00e25002, - 0x00e44806, - 0x00e5d801, - 0x00e6400a, - 0x00e6a00c, - 0x00e71006, - 0x00e76800, - 0x00e7a000, - 0x00e7c001, - 0x00e7d804, - 0x00ee003f, - 0x00f8b001, - 0x00f8f001, - 0x00fa3001, - 0x00fa7001, - 0x00fac000, - 0x00fad000, - 0x00fae000, - 0x00faf000, - 0x00fbf001, - 0x00fda800, - 0x00fe2800, - 0x00fea001, - 0x00fee000, - 0x00ff8001, - 0x00ffa800, - 0x00fff810, - 0x01014007, - 0x0102f810, - 0x01039001, - 0x01047800, - 0x0104e802, - 0x0106083e, - 0x010c6003, - 0x01213818, - 0x01225814, - 0x015ba001, - 0x015cb000, - 0x01677802, - 0x0167a004, - 0x01693000, - 0x01694004, - 0x01697001, - 0x016b4006, - 0x016b880e, - 0x016cb808, - 0x016d3800, - 0x016d7800, - 0x016db800, - 0x016df800, - 0x016e3800, - 0x016e7800, - 0x016eb800, - 0x016ef820, - 0x0172f021, - 0x0174d000, - 0x0177a00b, - 0x017eb019, - 0x017fe004, - 0x01815005, - 0x01820000, - 0x0184b803, - 0x01880004, - 0x01898000, - 0x018c7800, - 0x018f200b, - 0x0190f800, - 0x05246802, - 0x05263808, - 0x05316013, - 0x05337803, - 0x0533a009, - 0x0534f001, - 0x05378001, - 0x0537c007, - 0x053e5804, - 0x053e9000, - 0x053ea000, - 0x053ed017, - 0x05401000, - 0x05403000, - 0x05405800, - 0x05412801, - 0x05416003, - 0x0541d005, - 0x0543c007, - 0x05462009, - 0x0546d017, - 0x0547f800, - 0x05493007, - 0x054a380a, - 0x054aa00a, - 0x054be805, - 0x054d9800, - 0x054db003, - 0x054de001, - 0x054e7000, - 0x054ed003, - 0x054f2800, - 0x054ff800, - 0x05514805, - 0x05518801, - 0x0551a80a, - 0x05521800, - 0x05526000, - 0x05527001, - 0x0552d001, - 0x0553e000, - 0x05558000, - 0x05559002, - 0x0555b801, - 0x0555f001, - 0x05560800, - 0x05561817, - 0x05576001, - 0x0557b00a, - 0x05583801, - 0x05587801, - 0x0558b808, - 0x05593800, - 0x05597800, - 0x055b6003, - 0x055f2800, - 0x055f4000, - 0x055f6802, - 0x055fd005, - 0x06bd200b, - 0x06be3803, - 0x06bfe7ff, - 0x06ffe7ff, - 0x073fe7ff, - 0x077fe7ff, - 0x07bfe103, - 0x07d37001, - 0x07d6d025, - 0x07d8380b, - 0x07d8c004, - 0x07d8f000, - 0x07d9b800, - 0x07d9e800, - 0x07d9f800, - 0x07da1000, - 0x07da2800, - 0x07de180f, - 0x07ec8001, - 0x07ee4006, - 0x07ee801f, - 0x07f0000f, - 0x07f0d015, - 0x07f29800, - 0x07f33800, - 0x07f36003, - 0x07f3a800, - 0x07f7e803, - 0x07fcf001, - 0x07fdf802, - 0x07fe4001, - 0x07fe8001, - 0x07fec001, - 0x07fee802, - 0x07ff3800, - 0x07ff780c, - 0x07fff001, - 0x08006000, - 0x08013800, - 0x0801d800, - 0x0801f000, - 0x08027001, - 0x0802f021, - 0x0807d804, - 0x08081803, - 0x0809a002, - 0x080c7800, - 0x080ce802, - 0x080d082e, - 0x080fe882, - 0x0814e802, - 0x0816880f, - 0x0817e003, - 0x08192008, - 0x081a5804, - 0x081bb009, - 0x081cf000, - 0x081e2003, - 0x081eb029, - 0x0824f001, - 0x08255005, - 0x0826a003, - 0x0827e003, - 0x08294007, - 0x082b200a, - 0x082bd800, - 0x082c5800, - 0x082c9800, - 0x082cb000, - 0x082d1000, - 0x082d9000, - 0x082dd000, - 0x082de842, - 0x0839b808, - 0x083ab009, - 0x083b4017, - 0x083c3000, - 0x083d8800, - 0x083dd844, - 0x08403001, - 0x08404800, - 0x0841b000, - 0x0841c802, - 0x0841e801, - 0x0842b000, - 0x0844f807, - 0x0845802f, - 0x08479800, - 0x0847b004, - 0x0848e002, - 0x0849d004, - 0x084a003f, - 0x084dc003, - 0x084e8001, - 0x0850080e, - 0x0850a000, - 0x0850c000, - 0x0851b009, - 0x08524806, - 0x0852c806, - 0x0855001f, - 0x08572805, - 0x0857b808, - 0x0859b002, - 0x085ab001, - 0x085b9804, - 0x085c9006, - 0x085ce80b, - 0x085d804f, - 0x08624836, - 0x0865980c, - 0x08679806, - 0x0869200b, - 0x0869d125, - 0x0873f800, - 0x08755002, - 0x08757001, - 0x0875904d, - 0x08794007, - 0x087a300a, - 0x087ad015, - 0x087c1003, - 0x087c5025, - 0x087e6013, - 0x087fb808, - 0x08800800, - 0x0881c00e, - 0x08827003, - 0x08838000, - 0x08839801, - 0x0883b00b, - 0x08859803, - 0x0885c801, - 0x0885e800, - 0x0886100d, - 0x08874806, - 0x0887d008, - 0x08893804, - 0x08896808, - 0x088a4007, - 0x088b9800, - 0x088bb80a, - 0x088db008, - 0x088e4803, - 0x088e7800, - 0x088f0000, - 0x088fa80a, - 0x08909000, - 0x08917802, - 0x0891a000, - 0x0891b001, - 0x0891f000, - 0x0892083e, - 0x08943800, - 0x08944800, - 0x08947000, - 0x0894f000, - 0x08955005, - 0x0896f800, - 0x0897180c, - 0x0897d007, - 0x08982000, - 0x08986801, - 0x08988801, - 0x08994800, - 0x08998800, - 0x0899a000, - 0x0899d002, - 0x0899f000, - 0x089a0000, - 0x089a2801, - 0x089a4801, - 0x089a7001, - 0x089a880b, - 0x089b209b, - 0x08a1c007, - 0x08a21002, - 0x08a23000, - 0x08a2e000, - 0x08a2f000, - 0x08a3101d, - 0x08a58000, - 0x08a59805, - 0x08a5d000, - 0x08a5e800, - 0x08a5f801, - 0x08a61001, - 0x08a64007, - 0x08a6d0a5, - 0x08ad7800, - 0x08ad9005, - 0x08ade001, - 0x08adf801, - 0x08aee023, - 0x08b19807, - 0x08b1e800, - 0x08b1f801, - 0x08b2280a, - 0x08b2d005, - 0x08b36812, - 0x08b55800, - 0x08b56800, - 0x08b58005, - 0x08b5b800, - 0x08b5d005, - 0x08b65035, - 0x08b8d804, - 0x08b91003, - 0x08b93808, - 0x08ba38b8, - 0x08c17808, - 0x08c1c801, - 0x08c1e063, - 0x08c7980b, - 0x08c83801, - 0x08c85001, - 0x08c8a000, - 0x08c8b800, - 0x08c98000, - 0x08c9b000, - 0x08c9c803, - 0x08c9f000, - 0x08ca1800, - 0x08ca3808, - 0x08cad045, - 0x08cd4001, - 0x08cea007, - 0x08cf0000, - 0x08cf281a, - 0x08d00809, - 0x08d19805, - 0x08d1d803, - 0x08d23808, - 0x08d28805, - 0x08d2c802, - 0x08d4500c, - 0x08d4c001, - 0x08d5180c, - 0x08d7c806, - 0x08d850f5, - 0x08e04800, - 0x08e1800d, - 0x08e1f800, - 0x08e23009, - 0x08e36802, - 0x08e48018, - 0x08e55006, - 0x08e59001, - 0x08e5a84a, - 0x08e83800, - 0x08e85000, - 0x08e98814, - 0x08ea3808, - 0x08ead005, - 0x08eb3000, - 0x08eb4800, - 0x08ec7803, - 0x08eca800, - 0x08ecb800, - 0x08ecc806, - 0x08ed5135, - 0x08f79801, - 0x08f7c808, - 0x08f88800, - 0x08f9b007, - 0x08fa0000, - 0x08fa1000, - 0x08fad055, - 0x08fd880e, - 0x08ff900c, - 0x091cd065, - 0x09237800, - 0x0923a80a, - 0x092a27ff, - 0x096a224b, - 0x097f980c, - 0x09a18010, - 0x09a23fff, - 0x09e23fb8, - 0x0a323fff, - 0x0a723fff, - 0x0ab23fff, - 0x0af23fff, - 0x0b3239b8, - 0x0b51c806, - 0x0b52f800, - 0x0b535003, - 0x0b55f800, - 0x0b565005, - 0x0b577006, - 0x0b57b009, - 0x0b598006, - 0x0b5a3009, - 0x0b5ad000, - 0x0b5b1000, - 0x0b5bc004, - 0x0b5c82af, - 0x0b74d864, - 0x0b7a5804, - 0x0b7c400a, - 0x0b7d003f, - 0x0b7f200b, - 0x0b7f900d, - 0x0c3fc007, - 0x0c66b029, - 0x0c684fff, - 0x0ca84fff, - 0x0ce84fff, - 0x0d284fff, - 0x0d684ae6, - 0x0d7fa000, - 0x0d7fe000, - 0x0d7ff800, - 0x0d89180e, - 0x0d89981c, - 0x0d8a9801, - 0x0d8ab00d, - 0x0d8b4007, - 0x0d97e7ff, - 0x0dd7e103, - 0x0de35804, - 0x0de3e802, - 0x0de44806, - 0x0de4d001, - 0x0de4e801, - 0x0de507ff, - 0x0e2507ff, - 0x0e6502af, - 0x0e7e203b, - 0x0e87b009, - 0x0e893801, - 0x0e8b2800, - 0x0e8b3802, - 0x0e8b7014, - 0x0e8c2806, - 0x0e8d5003, - 0x0e8f5814, - 0x0e921002, - 0x0e923079, - 0x0e96a00b, - 0x0e97a00b, - 0x0e9ab808, - 0x0e9bc886, - 0x0ea2a800, - 0x0ea4e800, - 0x0ea50001, - 0x0ea51801, - 0x0ea53801, - 0x0ea56800, - 0x0ea5d000, - 0x0ea5e000, - 0x0ea62000, - 0x0ea83000, - 0x0ea85801, - 0x0ea8a800, - 0x0ea8e800, - 0x0ea9d000, - 0x0ea9f800, - 0x0eaa2800, - 0x0eaa3802, - 0x0eaa8800, - 0x0eb53001, - 0x0ebe6001, - 0x0ed00036, - 0x0ed1d831, - 0x0ed3a800, - 0x0ed42000, - 0x0ed46473, - 0x0ef8f805, - 0x0ef95904, - 0x0f037091, - 0x0f096809, - 0x0f09f001, - 0x0f0a5003, - 0x0f0a813f, - 0x0f157011, - 0x0f176003, - 0x0f17d004, - 0x0f1801cf, - 0x0f276003, - 0x0f27d2e5, - 0x0f3f3800, - 0x0f3f6000, - 0x0f3f7800, - 0x0f3ff800, - 0x0f462801, - 0x0f46802f, - 0x0f4a2006, - 0x0f4a6003, - 0x0f4ad003, - 0x0f4b0310, - 0x0f65a84b, - 0x0f69f0c1, - 0x0f702000, - 0x0f710000, - 0x0f711800, - 0x0f712801, - 0x0f714000, - 0x0f719800, - 0x0f71c000, - 0x0f71d000, - 0x0f71e005, - 0x0f721803, - 0x0f724000, - 0x0f725000, - 0x0f726000, - 0x0f728000, - 0x0f729800, - 0x0f72a801, - 0x0f72c000, - 0x0f72d000, - 0x0f72e000, - 0x0f72f000, - 0x0f730000, - 0x0f731800, - 0x0f732801, - 0x0f735800, - 0x0f739800, - 0x0f73c000, - 0x0f73e800, - 0x0f73f800, - 0x0f745000, - 0x0f74e004, - 0x0f752000, - 0x0f755000, - 0x0f75e033, - 0x0f77910d, - 0x0f816003, - 0x0f84a00b, - 0x0f857801, - 0x0f860000, - 0x0f868000, - 0x0f87b009, - 0x0f8d7037, - 0x0f90180c, - 0x0f91e003, - 0x0f924806, - 0x0f92900d, - 0x0f933099, - 0x0fb6c003, - 0x0fb76802, - 0x0fb7e802, - 0x0fbbb803, - 0x0fbed005, - 0x0fbf6003, - 0x0fbf880e, - 0x0fc06003, - 0x0fc24007, - 0x0fc2d005, - 0x0fc44007, - 0x0fc57001, - 0x0fc5904d, - 0x0fd2a00b, - 0x0fd37001, - 0x0fd3e802, - 0x0fd44806, - 0x0fd5f000, - 0x0fd63007, - 0x0fd6e003, - 0x0fd74806, - 0x0fd7c806, - 0x0fdc9800, - 0x0fde5824, - 0x0fdfd405, - 0x1537001f, - 0x15b9d005, - 0x15c0f001, - 0x1675100d, - 0x175f0fff, - 0x179f0c1e, - 0x17d0f5e1, - 0x189a5804}; - -/// At the end of the valid Unicode code points space a lot of code points are -/// either reserved or a noncharacter. Adding all these entries to the -/// lookup table would add 446 entries to the table (in Unicode 14). -/// Instead the only the start of the region is stored, every code point in -/// this region needs to be escaped. -inline constexpr uint32_t __unallocated_region_lower_bound = 0x000323b0; +/// - bits [0, 13] The size of the range, allowing 16384 elements. +/// - bits [14, 31] The lower bound code point of the range. The upper bound of +/// the range is lower bound + size. Note the code expects code units the fit +/// into 18 bits, instead of the 21 bits needed for the full Unicode range. +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[711] = { + 0x00000020 /* 00000000 - 00000020 [ 33] */, + 0x001fc021 /* 0000007f - 000000a0 [ 34] */, + 0x002b4000 /* 000000ad - 000000ad [ 1] */, + 0x00de0001 /* 00000378 - 00000379 [ 2] */, + 0x00e00003 /* 00000380 - 00000383 [ 4] */, + 0x00e2c000 /* 0000038b - 0000038b [ 1] */, + 0x00e34000 /* 0000038d - 0000038d [ 1] */, + 0x00e88000 /* 000003a2 - 000003a2 [ 1] */, + 0x014c0000 /* 00000530 - 00000530 [ 1] */, + 0x0155c001 /* 00000557 - 00000558 [ 2] */, + 0x0162c001 /* 0000058b - 0000058c [ 2] */, + 0x01640000 /* 00000590 - 00000590 [ 1] */, + 0x01720007 /* 000005c8 - 000005cf [ 8] */, + 0x017ac003 /* 000005eb - 000005ee [ 4] */, + 0x017d4010 /* 000005f5 - 00000605 [ 17] */, + 0x01870000 /* 0000061c - 0000061c [ 1] */, + 0x01b74000 /* 000006dd - 000006dd [ 1] */, + 0x01c38001 /* 0000070e - 0000070f [ 2] */, + 0x01d2c001 /* 0000074b - 0000074c [ 2] */, + 0x01ec800d /* 000007b2 - 000007bf [ 14] */, + 0x01fec001 /* 000007fb - 000007fc [ 2] */, + 0x020b8001 /* 0000082e - 0000082f [ 2] */, + 0x020fc000 /* 0000083f - 0000083f [ 1] */, + 0x02170001 /* 0000085c - 0000085d [ 2] */, + 0x0217c000 /* 0000085f - 0000085f [ 1] */, + 0x021ac004 /* 0000086b - 0000086f [ 5] */, + 0x0223c008 /* 0000088f - 00000897 [ 9] */, + 0x02388000 /* 000008e2 - 000008e2 [ 1] */, + 0x02610000 /* 00000984 - 00000984 [ 1] */, + 0x02634001 /* 0000098d - 0000098e [ 2] */, + 0x02644001 /* 00000991 - 00000992 [ 2] */, + 0x026a4000 /* 000009a9 - 000009a9 [ 1] */, + 0x026c4000 /* 000009b1 - 000009b1 [ 1] */, + 0x026cc002 /* 000009b3 - 000009b5 [ 3] */, + 0x026e8001 /* 000009ba - 000009bb [ 2] */, + 0x02714001 /* 000009c5 - 000009c6 [ 2] */, + 0x02724001 /* 000009c9 - 000009ca [ 2] */, + 0x0273c007 /* 000009cf - 000009d6 [ 8] */, + 0x02760003 /* 000009d8 - 000009db [ 4] */, + 0x02778000 /* 000009de - 000009de [ 1] */, + 0x02790001 /* 000009e4 - 000009e5 [ 2] */, + 0x027fc001 /* 000009ff - 00000a00 [ 2] */, + 0x02810000 /* 00000a04 - 00000a04 [ 1] */, + 0x0282c003 /* 00000a0b - 00000a0e [ 4] */, + 0x02844001 /* 00000a11 - 00000a12 [ 2] */, + 0x028a4000 /* 00000a29 - 00000a29 [ 1] */, + 0x028c4000 /* 00000a31 - 00000a31 [ 1] */, + 0x028d0000 /* 00000a34 - 00000a34 [ 1] */, + 0x028dc000 /* 00000a37 - 00000a37 [ 1] */, + 0x028e8001 /* 00000a3a - 00000a3b [ 2] */, + 0x028f4000 /* 00000a3d - 00000a3d [ 1] */, + 0x0290c003 /* 00000a43 - 00000a46 [ 4] */, + 0x02924001 /* 00000a49 - 00000a4a [ 2] */, + 0x02938002 /* 00000a4e - 00000a50 [ 3] */, + 0x02948006 /* 00000a52 - 00000a58 [ 7] */, + 0x02974000 /* 00000a5d - 00000a5d [ 1] */, + 0x0297c006 /* 00000a5f - 00000a65 [ 7] */, + 0x029dc009 /* 00000a77 - 00000a80 [ 10] */, + 0x02a10000 /* 00000a84 - 00000a84 [ 1] */, + 0x02a38000 /* 00000a8e - 00000a8e [ 1] */, + 0x02a48000 /* 00000a92 - 00000a92 [ 1] */, + 0x02aa4000 /* 00000aa9 - 00000aa9 [ 1] */, + 0x02ac4000 /* 00000ab1 - 00000ab1 [ 1] */, + 0x02ad0000 /* 00000ab4 - 00000ab4 [ 1] */, + 0x02ae8001 /* 00000aba - 00000abb [ 2] */, + 0x02b18000 /* 00000ac6 - 00000ac6 [ 1] */, + 0x02b28000 /* 00000aca - 00000aca [ 1] */, + 0x02b38001 /* 00000ace - 00000acf [ 2] */, + 0x02b4400e /* 00000ad1 - 00000adf [ 15] */, + 0x02b90001 /* 00000ae4 - 00000ae5 [ 2] */, + 0x02bc8006 /* 00000af2 - 00000af8 [ 7] */, + 0x02c00000 /* 00000b00 - 00000b00 [ 1] */, + 0x02c10000 /* 00000b04 - 00000b04 [ 1] */, + 0x02c34001 /* 00000b0d - 00000b0e [ 2] */, + 0x02c44001 /* 00000b11 - 00000b12 [ 2] */, + 0x02ca4000 /* 00000b29 - 00000b29 [ 1] */, + 0x02cc4000 /* 00000b31 - 00000b31 [ 1] */, + 0x02cd0000 /* 00000b34 - 00000b34 [ 1] */, + 0x02ce8001 /* 00000b3a - 00000b3b [ 2] */, + 0x02d14001 /* 00000b45 - 00000b46 [ 2] */, + 0x02d24001 /* 00000b49 - 00000b4a [ 2] */, + 0x02d38006 /* 00000b4e - 00000b54 [ 7] */, + 0x02d60003 /* 00000b58 - 00000b5b [ 4] */, + 0x02d78000 /* 00000b5e - 00000b5e [ 1] */, + 0x02d90001 /* 00000b64 - 00000b65 [ 2] */, + 0x02de0009 /* 00000b78 - 00000b81 [ 10] */, + 0x02e10000 /* 00000b84 - 00000b84 [ 1] */, + 0x02e2c002 /* 00000b8b - 00000b8d [ 3] */, + 0x02e44000 /* 00000b91 - 00000b91 [ 1] */, + 0x02e58002 /* 00000b96 - 00000b98 [ 3] */, + 0x02e6c000 /* 00000b9b - 00000b9b [ 1] */, + 0x02e74000 /* 00000b9d - 00000b9d [ 1] */, + 0x02e80002 /* 00000ba0 - 00000ba2 [ 3] */, + 0x02e94002 /* 00000ba5 - 00000ba7 [ 3] */, + 0x02eac002 /* 00000bab - 00000bad [ 3] */, + 0x02ee8003 /* 00000bba - 00000bbd [ 4] */, + 0x02f0c002 /* 00000bc3 - 00000bc5 [ 3] */, + 0x02f24000 /* 00000bc9 - 00000bc9 [ 1] */, + 0x02f38001 /* 00000bce - 00000bcf [ 2] */, + 0x02f44005 /* 00000bd1 - 00000bd6 [ 6] */, + 0x02f6000d /* 00000bd8 - 00000be5 [ 14] */, + 0x02fec004 /* 00000bfb - 00000bff [ 5] */, + 0x03034000 /* 00000c0d - 00000c0d [ 1] */, + 0x03044000 /* 00000c11 - 00000c11 [ 1] */, + 0x030a4000 /* 00000c29 - 00000c29 [ 1] */, + 0x030e8001 /* 00000c3a - 00000c3b [ 2] */, + 0x03114000 /* 00000c45 - 00000c45 [ 1] */, + 0x03124000 /* 00000c49 - 00000c49 [ 1] */, + 0x03138006 /* 00000c4e - 00000c54 [ 7] */, + 0x0315c000 /* 00000c57 - 00000c57 [ 1] */, + 0x0316c001 /* 00000c5b - 00000c5c [ 2] */, + 0x03178001 /* 00000c5e - 00000c5f [ 2] */, + 0x03190001 /* 00000c64 - 00000c65 [ 2] */, + 0x031c0006 /* 00000c70 - 00000c76 [ 7] */, + 0x03234000 /* 00000c8d - 00000c8d [ 1] */, + 0x03244000 /* 00000c91 - 00000c91 [ 1] */, + 0x032a4000 /* 00000ca9 - 00000ca9 [ 1] */, + 0x032d0000 /* 00000cb4 - 00000cb4 [ 1] */, + 0x032e8001 /* 00000cba - 00000cbb [ 2] */, + 0x03314000 /* 00000cc5 - 00000cc5 [ 1] */, + 0x03324000 /* 00000cc9 - 00000cc9 [ 1] */, + 0x03338006 /* 00000cce - 00000cd4 [ 7] */, + 0x0335c005 /* 00000cd7 - 00000cdc [ 6] */, + 0x0337c000 /* 00000cdf - 00000cdf [ 1] */, + 0x03390001 /* 00000ce4 - 00000ce5 [ 2] */, + 0x033c0000 /* 00000cf0 - 00000cf0 [ 1] */, + 0x033d000b /* 00000cf4 - 00000cff [ 12] */, + 0x03434000 /* 00000d0d - 00000d0d [ 1] */, + 0x03444000 /* 00000d11 - 00000d11 [ 1] */, + 0x03514000 /* 00000d45 - 00000d45 [ 1] */, + 0x03524000 /* 00000d49 - 00000d49 [ 1] */, + 0x03540003 /* 00000d50 - 00000d53 [ 4] */, + 0x03590001 /* 00000d64 - 00000d65 [ 2] */, + 0x03600000 /* 00000d80 - 00000d80 [ 1] */, + 0x03610000 /* 00000d84 - 00000d84 [ 1] */, + 0x0365c002 /* 00000d97 - 00000d99 [ 3] */, + 0x036c8000 /* 00000db2 - 00000db2 [ 1] */, + 0x036f0000 /* 00000dbc - 00000dbc [ 1] */, + 0x036f8001 /* 00000dbe - 00000dbf [ 2] */, + 0x0371c002 /* 00000dc7 - 00000dc9 [ 3] */, + 0x0372c003 /* 00000dcb - 00000dce [ 4] */, + 0x03754000 /* 00000dd5 - 00000dd5 [ 1] */, + 0x0375c000 /* 00000dd7 - 00000dd7 [ 1] */, + 0x03780005 /* 00000de0 - 00000de5 [ 6] */, + 0x037c0001 /* 00000df0 - 00000df1 [ 2] */, + 0x037d400b /* 00000df5 - 00000e00 [ 12] */, + 0x038ec003 /* 00000e3b - 00000e3e [ 4] */, + 0x03970024 /* 00000e5c - 00000e80 [ 37] */, + 0x03a0c000 /* 00000e83 - 00000e83 [ 1] */, + 0x03a14000 /* 00000e85 - 00000e85 [ 1] */, + 0x03a2c000 /* 00000e8b - 00000e8b [ 1] */, + 0x03a90000 /* 00000ea4 - 00000ea4 [ 1] */, + 0x03a98000 /* 00000ea6 - 00000ea6 [ 1] */, + 0x03af8001 /* 00000ebe - 00000ebf [ 2] */, + 0x03b14000 /* 00000ec5 - 00000ec5 [ 1] */, + 0x03b1c000 /* 00000ec7 - 00000ec7 [ 1] */, + 0x03b3c000 /* 00000ecf - 00000ecf [ 1] */, + 0x03b68001 /* 00000eda - 00000edb [ 2] */, + 0x03b8001f /* 00000ee0 - 00000eff [ 32] */, + 0x03d20000 /* 00000f48 - 00000f48 [ 1] */, + 0x03db4003 /* 00000f6d - 00000f70 [ 4] */, + 0x03e60000 /* 00000f98 - 00000f98 [ 1] */, + 0x03ef4000 /* 00000fbd - 00000fbd [ 1] */, + 0x03f34000 /* 00000fcd - 00000fcd [ 1] */, + 0x03f6c024 /* 00000fdb - 00000fff [ 37] */, + 0x04318000 /* 000010c6 - 000010c6 [ 1] */, + 0x04320004 /* 000010c8 - 000010cc [ 5] */, + 0x04338001 /* 000010ce - 000010cf [ 2] */, + 0x04924000 /* 00001249 - 00001249 [ 1] */, + 0x04938001 /* 0000124e - 0000124f [ 2] */, + 0x0495c000 /* 00001257 - 00001257 [ 1] */, + 0x04964000 /* 00001259 - 00001259 [ 1] */, + 0x04978001 /* 0000125e - 0000125f [ 2] */, + 0x04a24000 /* 00001289 - 00001289 [ 1] */, + 0x04a38001 /* 0000128e - 0000128f [ 2] */, + 0x04ac4000 /* 000012b1 - 000012b1 [ 1] */, + 0x04ad8001 /* 000012b6 - 000012b7 [ 2] */, + 0x04afc000 /* 000012bf - 000012bf [ 1] */, + 0x04b04000 /* 000012c1 - 000012c1 [ 1] */, + 0x04b18001 /* 000012c6 - 000012c7 [ 2] */, + 0x04b5c000 /* 000012d7 - 000012d7 [ 1] */, + 0x04c44000 /* 00001311 - 00001311 [ 1] */, + 0x04c58001 /* 00001316 - 00001317 [ 2] */, + 0x04d6c001 /* 0000135b - 0000135c [ 2] */, + 0x04df4002 /* 0000137d - 0000137f [ 3] */, + 0x04e68005 /* 0000139a - 0000139f [ 6] */, + 0x04fd8001 /* 000013f6 - 000013f7 [ 2] */, + 0x04ff8001 /* 000013fe - 000013ff [ 2] */, + 0x05a00000 /* 00001680 - 00001680 [ 1] */, + 0x05a74002 /* 0000169d - 0000169f [ 3] */, + 0x05be4006 /* 000016f9 - 000016ff [ 7] */, + 0x05c58008 /* 00001716 - 0000171e [ 9] */, + 0x05cdc008 /* 00001737 - 0000173f [ 9] */, + 0x05d5000b /* 00001754 - 0000175f [ 12] */, + 0x05db4000 /* 0000176d - 0000176d [ 1] */, + 0x05dc4000 /* 00001771 - 00001771 [ 1] */, + 0x05dd000b /* 00001774 - 0000177f [ 12] */, + 0x05f78001 /* 000017de - 000017df [ 2] */, + 0x05fa8005 /* 000017ea - 000017ef [ 6] */, + 0x05fe8005 /* 000017fa - 000017ff [ 6] */, + 0x06038000 /* 0000180e - 0000180e [ 1] */, + 0x06068005 /* 0000181a - 0000181f [ 6] */, + 0x061e4006 /* 00001879 - 0000187f [ 7] */, + 0x062ac004 /* 000018ab - 000018af [ 5] */, + 0x063d8009 /* 000018f6 - 000018ff [ 10] */, + 0x0647c000 /* 0000191f - 0000191f [ 1] */, + 0x064b0003 /* 0000192c - 0000192f [ 4] */, + 0x064f0003 /* 0000193c - 0000193f [ 4] */, + 0x06504002 /* 00001941 - 00001943 [ 3] */, + 0x065b8001 /* 0000196e - 0000196f [ 2] */, + 0x065d400a /* 00001975 - 0000197f [ 11] */, + 0x066b0003 /* 000019ac - 000019af [ 4] */, + 0x06728005 /* 000019ca - 000019cf [ 6] */, + 0x0676c002 /* 000019db - 000019dd [ 3] */, + 0x06870001 /* 00001a1c - 00001a1d [ 2] */, + 0x0697c000 /* 00001a5f - 00001a5f [ 1] */, + 0x069f4001 /* 00001a7d - 00001a7e [ 2] */, + 0x06a28005 /* 00001a8a - 00001a8f [ 6] */, + 0x06a68005 /* 00001a9a - 00001a9f [ 6] */, + 0x06ab8001 /* 00001aae - 00001aaf [ 2] */, + 0x06b3c030 /* 00001acf - 00001aff [ 49] */, + 0x06d34002 /* 00001b4d - 00001b4f [ 3] */, + 0x06dfc000 /* 00001b7f - 00001b7f [ 1] */, + 0x06fd0007 /* 00001bf4 - 00001bfb [ 8] */, + 0x070e0002 /* 00001c38 - 00001c3a [ 3] */, + 0x07128002 /* 00001c4a - 00001c4c [ 3] */, + 0x07224006 /* 00001c89 - 00001c8f [ 7] */, + 0x072ec001 /* 00001cbb - 00001cbc [ 2] */, + 0x07320007 /* 00001cc8 - 00001ccf [ 8] */, + 0x073ec004 /* 00001cfb - 00001cff [ 5] */, + 0x07c58001 /* 00001f16 - 00001f17 [ 2] */, + 0x07c78001 /* 00001f1e - 00001f1f [ 2] */, + 0x07d18001 /* 00001f46 - 00001f47 [ 2] */, + 0x07d38001 /* 00001f4e - 00001f4f [ 2] */, + 0x07d60000 /* 00001f58 - 00001f58 [ 1] */, + 0x07d68000 /* 00001f5a - 00001f5a [ 1] */, + 0x07d70000 /* 00001f5c - 00001f5c [ 1] */, + 0x07d78000 /* 00001f5e - 00001f5e [ 1] */, + 0x07df8001 /* 00001f7e - 00001f7f [ 2] */, + 0x07ed4000 /* 00001fb5 - 00001fb5 [ 1] */, + 0x07f14000 /* 00001fc5 - 00001fc5 [ 1] */, + 0x07f50001 /* 00001fd4 - 00001fd5 [ 2] */, + 0x07f70000 /* 00001fdc - 00001fdc [ 1] */, + 0x07fc0001 /* 00001ff0 - 00001ff1 [ 2] */, + 0x07fd4000 /* 00001ff5 - 00001ff5 [ 1] */, + 0x07ffc010 /* 00001fff - 0000200f [ 17] */, + 0x080a0007 /* 00002028 - 0000202f [ 8] */, + 0x0817c010 /* 0000205f - 0000206f [ 17] */, + 0x081c8001 /* 00002072 - 00002073 [ 2] */, + 0x0823c000 /* 0000208f - 0000208f [ 1] */, + 0x08274002 /* 0000209d - 0000209f [ 3] */, + 0x0830400e /* 000020c1 - 000020cf [ 15] */, + 0x083c400e /* 000020f1 - 000020ff [ 15] */, + 0x08630003 /* 0000218c - 0000218f [ 4] */, + 0x0909c018 /* 00002427 - 0000243f [ 25] */, + 0x0912c014 /* 0000244b - 0000245f [ 21] */, + 0x0add0001 /* 00002b74 - 00002b75 [ 2] */, + 0x0ae58000 /* 00002b96 - 00002b96 [ 1] */, + 0x0b3d0004 /* 00002cf4 - 00002cf8 [ 5] */, + 0x0b498000 /* 00002d26 - 00002d26 [ 1] */, + 0x0b4a0004 /* 00002d28 - 00002d2c [ 5] */, + 0x0b4b8001 /* 00002d2e - 00002d2f [ 2] */, + 0x0b5a0006 /* 00002d68 - 00002d6e [ 7] */, + 0x0b5c400d /* 00002d71 - 00002d7e [ 14] */, + 0x0b65c008 /* 00002d97 - 00002d9f [ 9] */, + 0x0b69c000 /* 00002da7 - 00002da7 [ 1] */, + 0x0b6bc000 /* 00002daf - 00002daf [ 1] */, + 0x0b6dc000 /* 00002db7 - 00002db7 [ 1] */, + 0x0b6fc000 /* 00002dbf - 00002dbf [ 1] */, + 0x0b71c000 /* 00002dc7 - 00002dc7 [ 1] */, + 0x0b73c000 /* 00002dcf - 00002dcf [ 1] */, + 0x0b75c000 /* 00002dd7 - 00002dd7 [ 1] */, + 0x0b77c000 /* 00002ddf - 00002ddf [ 1] */, + 0x0b978021 /* 00002e5e - 00002e7f [ 34] */, + 0x0ba68000 /* 00002e9a - 00002e9a [ 1] */, + 0x0bbd000b /* 00002ef4 - 00002eff [ 12] */, + 0x0bf58019 /* 00002fd6 - 00002fef [ 26] */, + 0x0c000000 /* 00003000 - 00003000 [ 1] */, + 0x0c100000 /* 00003040 - 00003040 [ 1] */, + 0x0c25c001 /* 00003097 - 00003098 [ 2] */, + 0x0c400004 /* 00003100 - 00003104 [ 5] */, + 0x0c4c0000 /* 00003130 - 00003130 [ 1] */, + 0x0c63c000 /* 0000318f - 0000318f [ 1] */, + 0x0c79000a /* 000031e4 - 000031ee [ 11] */, + 0x0c87c000 /* 0000321f - 0000321f [ 1] */, + 0x29234002 /* 0000a48d - 0000a48f [ 3] */, + 0x2931c008 /* 0000a4c7 - 0000a4cf [ 9] */, + 0x298b0013 /* 0000a62c - 0000a63f [ 20] */, + 0x29be0007 /* 0000a6f8 - 0000a6ff [ 8] */, + 0x29f2c004 /* 0000a7cb - 0000a7cf [ 5] */, + 0x29f48000 /* 0000a7d2 - 0000a7d2 [ 1] */, + 0x29f50000 /* 0000a7d4 - 0000a7d4 [ 1] */, + 0x29f68017 /* 0000a7da - 0000a7f1 [ 24] */, + 0x2a0b4002 /* 0000a82d - 0000a82f [ 3] */, + 0x2a0e8005 /* 0000a83a - 0000a83f [ 6] */, + 0x2a1e0007 /* 0000a878 - 0000a87f [ 8] */, + 0x2a318007 /* 0000a8c6 - 0000a8cd [ 8] */, + 0x2a368005 /* 0000a8da - 0000a8df [ 6] */, + 0x2a55000a /* 0000a954 - 0000a95e [ 11] */, + 0x2a5f4002 /* 0000a97d - 0000a97f [ 3] */, + 0x2a738000 /* 0000a9ce - 0000a9ce [ 1] */, + 0x2a768003 /* 0000a9da - 0000a9dd [ 4] */, + 0x2a7fc000 /* 0000a9ff - 0000a9ff [ 1] */, + 0x2a8dc008 /* 0000aa37 - 0000aa3f [ 9] */, + 0x2a938001 /* 0000aa4e - 0000aa4f [ 2] */, + 0x2a968001 /* 0000aa5a - 0000aa5b [ 2] */, + 0x2ab0c017 /* 0000aac3 - 0000aada [ 24] */, + 0x2abdc009 /* 0000aaf7 - 0000ab00 [ 10] */, + 0x2ac1c001 /* 0000ab07 - 0000ab08 [ 2] */, + 0x2ac3c001 /* 0000ab0f - 0000ab10 [ 2] */, + 0x2ac5c008 /* 0000ab17 - 0000ab1f [ 9] */, + 0x2ac9c000 /* 0000ab27 - 0000ab27 [ 1] */, + 0x2acbc000 /* 0000ab2f - 0000ab2f [ 1] */, + 0x2adb0003 /* 0000ab6c - 0000ab6f [ 4] */, + 0x2afb8001 /* 0000abee - 0000abef [ 2] */, + 0x2afe8005 /* 0000abfa - 0000abff [ 6] */, + 0x35e9000b /* 0000d7a4 - 0000d7af [ 12] */, + 0x35f1c003 /* 0000d7c7 - 0000d7ca [ 4] */, + 0x35ff2103 /* 0000d7fc - 0000f8ff [ 8452] */, + 0x3e9b8001 /* 0000fa6e - 0000fa6f [ 2] */, + 0x3eb68025 /* 0000fada - 0000faff [ 38] */, + 0x3ec1c00b /* 0000fb07 - 0000fb12 [ 12] */, + 0x3ec60004 /* 0000fb18 - 0000fb1c [ 5] */, + 0x3ecdc000 /* 0000fb37 - 0000fb37 [ 1] */, + 0x3ecf4000 /* 0000fb3d - 0000fb3d [ 1] */, + 0x3ecfc000 /* 0000fb3f - 0000fb3f [ 1] */, + 0x3ed08000 /* 0000fb42 - 0000fb42 [ 1] */, + 0x3ed14000 /* 0000fb45 - 0000fb45 [ 1] */, + 0x3ef0c00f /* 0000fbc3 - 0000fbd2 [ 16] */, + 0x3f640001 /* 0000fd90 - 0000fd91 [ 2] */, + 0x3f720006 /* 0000fdc8 - 0000fdce [ 7] */, + 0x3f74001f /* 0000fdd0 - 0000fdef [ 32] */, + 0x3f868005 /* 0000fe1a - 0000fe1f [ 6] */, + 0x3f94c000 /* 0000fe53 - 0000fe53 [ 1] */, + 0x3f99c000 /* 0000fe67 - 0000fe67 [ 1] */, + 0x3f9b0003 /* 0000fe6c - 0000fe6f [ 4] */, + 0x3f9d4000 /* 0000fe75 - 0000fe75 [ 1] */, + 0x3fbf4003 /* 0000fefd - 0000ff00 [ 4] */, + 0x3fefc002 /* 0000ffbf - 0000ffc1 [ 3] */, + 0x3ff20001 /* 0000ffc8 - 0000ffc9 [ 2] */, + 0x3ff40001 /* 0000ffd0 - 0000ffd1 [ 2] */, + 0x3ff60001 /* 0000ffd8 - 0000ffd9 [ 2] */, + 0x3ff74002 /* 0000ffdd - 0000ffdf [ 3] */, + 0x3ff9c000 /* 0000ffe7 - 0000ffe7 [ 1] */, + 0x3ffbc00c /* 0000ffef - 0000fffb [ 13] */, + 0x3fff8001 /* 0000fffe - 0000ffff [ 2] */, + 0x40030000 /* 0001000c - 0001000c [ 1] */, + 0x4009c000 /* 00010027 - 00010027 [ 1] */, + 0x400ec000 /* 0001003b - 0001003b [ 1] */, + 0x400f8000 /* 0001003e - 0001003e [ 1] */, + 0x40138001 /* 0001004e - 0001004f [ 2] */, + 0x40178021 /* 0001005e - 0001007f [ 34] */, + 0x403ec004 /* 000100fb - 000100ff [ 5] */, + 0x4040c003 /* 00010103 - 00010106 [ 4] */, + 0x404d0002 /* 00010134 - 00010136 [ 3] */, + 0x4063c000 /* 0001018f - 0001018f [ 1] */, + 0x40674002 /* 0001019d - 0001019f [ 3] */, + 0x4068402e /* 000101a1 - 000101cf [ 47] */, + 0x407f8081 /* 000101fe - 0001027f [ 130] */, + 0x40a74002 /* 0001029d - 0001029f [ 3] */, + 0x40b4400e /* 000102d1 - 000102df [ 15] */, + 0x40bf0003 /* 000102fc - 000102ff [ 4] */, + 0x40c90008 /* 00010324 - 0001032c [ 9] */, + 0x40d2c004 /* 0001034b - 0001034f [ 5] */, + 0x40dec004 /* 0001037b - 0001037f [ 5] */, + 0x40e78000 /* 0001039e - 0001039e [ 1] */, + 0x40f10003 /* 000103c4 - 000103c7 [ 4] */, + 0x40f58029 /* 000103d6 - 000103ff [ 42] */, + 0x41278001 /* 0001049e - 0001049f [ 2] */, + 0x412a8005 /* 000104aa - 000104af [ 6] */, + 0x41350003 /* 000104d4 - 000104d7 [ 4] */, + 0x413f0003 /* 000104fc - 000104ff [ 4] */, + 0x414a0007 /* 00010528 - 0001052f [ 8] */, + 0x4159000a /* 00010564 - 0001056e [ 11] */, + 0x415ec000 /* 0001057b - 0001057b [ 1] */, + 0x4162c000 /* 0001058b - 0001058b [ 1] */, + 0x4164c000 /* 00010593 - 00010593 [ 1] */, + 0x41658000 /* 00010596 - 00010596 [ 1] */, + 0x41688000 /* 000105a2 - 000105a2 [ 1] */, + 0x416c8000 /* 000105b2 - 000105b2 [ 1] */, + 0x416e8000 /* 000105ba - 000105ba [ 1] */, + 0x416f4042 /* 000105bd - 000105ff [ 67] */, + 0x41cdc008 /* 00010737 - 0001073f [ 9] */, + 0x41d58009 /* 00010756 - 0001075f [ 10] */, + 0x41da0017 /* 00010768 - 0001077f [ 24] */, + 0x41e18000 /* 00010786 - 00010786 [ 1] */, + 0x41ec4000 /* 000107b1 - 000107b1 [ 1] */, + 0x41eec044 /* 000107bb - 000107ff [ 69] */, + 0x42018001 /* 00010806 - 00010807 [ 2] */, + 0x42024000 /* 00010809 - 00010809 [ 1] */, + 0x420d8000 /* 00010836 - 00010836 [ 1] */, + 0x420e4002 /* 00010839 - 0001083b [ 3] */, + 0x420f4001 /* 0001083d - 0001083e [ 2] */, + 0x42158000 /* 00010856 - 00010856 [ 1] */, + 0x4227c007 /* 0001089f - 000108a6 [ 8] */, + 0x422c002f /* 000108b0 - 000108df [ 48] */, + 0x423cc000 /* 000108f3 - 000108f3 [ 1] */, + 0x423d8004 /* 000108f6 - 000108fa [ 5] */, + 0x42470002 /* 0001091c - 0001091e [ 3] */, + 0x424e8004 /* 0001093a - 0001093e [ 5] */, + 0x4250003f /* 00010940 - 0001097f [ 64] */, + 0x426e0003 /* 000109b8 - 000109bb [ 4] */, + 0x42740001 /* 000109d0 - 000109d1 [ 2] */, + 0x42810000 /* 00010a04 - 00010a04 [ 1] */, + 0x4281c004 /* 00010a07 - 00010a0b [ 5] */, + 0x42850000 /* 00010a14 - 00010a14 [ 1] */, + 0x42860000 /* 00010a18 - 00010a18 [ 1] */, + 0x428d8001 /* 00010a36 - 00010a37 [ 2] */, + 0x428ec003 /* 00010a3b - 00010a3e [ 4] */, + 0x42924006 /* 00010a49 - 00010a4f [ 7] */, + 0x42964006 /* 00010a59 - 00010a5f [ 7] */, + 0x42a8001f /* 00010aa0 - 00010abf [ 32] */, + 0x42b9c003 /* 00010ae7 - 00010aea [ 4] */, + 0x42bdc008 /* 00010af7 - 00010aff [ 9] */, + 0x42cd8002 /* 00010b36 - 00010b38 [ 3] */, + 0x42d58001 /* 00010b56 - 00010b57 [ 2] */, + 0x42dcc004 /* 00010b73 - 00010b77 [ 5] */, + 0x42e48006 /* 00010b92 - 00010b98 [ 7] */, + 0x42e7400b /* 00010b9d - 00010ba8 [ 12] */, + 0x42ec004f /* 00010bb0 - 00010bff [ 80] */, + 0x43124036 /* 00010c49 - 00010c7f [ 55] */, + 0x432cc00c /* 00010cb3 - 00010cbf [ 13] */, + 0x433cc006 /* 00010cf3 - 00010cf9 [ 7] */, + 0x434a0007 /* 00010d28 - 00010d2f [ 8] */, + 0x434e8125 /* 00010d3a - 00010e5f [ 294] */, + 0x439fc000 /* 00010e7f - 00010e7f [ 1] */, + 0x43aa8000 /* 00010eaa - 00010eaa [ 1] */, + 0x43ab8001 /* 00010eae - 00010eaf [ 2] */, + 0x43ac804a /* 00010eb2 - 00010efc [ 75] */, + 0x43ca0007 /* 00010f28 - 00010f2f [ 8] */, + 0x43d68015 /* 00010f5a - 00010f6f [ 22] */, + 0x43e28025 /* 00010f8a - 00010faf [ 38] */, + 0x43f30013 /* 00010fcc - 00010fdf [ 20] */, + 0x43fdc008 /* 00010ff7 - 00010fff [ 9] */, + 0x44138003 /* 0001104e - 00011051 [ 4] */, + 0x441d8008 /* 00011076 - 0001107e [ 9] */, + 0x442f4000 /* 000110bd - 000110bd [ 1] */, + 0x4430c00c /* 000110c3 - 000110cf [ 13] */, + 0x443a4006 /* 000110e9 - 000110ef [ 7] */, + 0x443e8005 /* 000110fa - 000110ff [ 6] */, + 0x444d4000 /* 00011135 - 00011135 [ 1] */, + 0x44520007 /* 00011148 - 0001114f [ 8] */, + 0x445dc008 /* 00011177 - 0001117f [ 9] */, + 0x44780000 /* 000111e0 - 000111e0 [ 1] */, + 0x447d400a /* 000111f5 - 000111ff [ 11] */, + 0x44848000 /* 00011212 - 00011212 [ 1] */, + 0x4490803d /* 00011242 - 0001127f [ 62] */, + 0x44a1c000 /* 00011287 - 00011287 [ 1] */, + 0x44a24000 /* 00011289 - 00011289 [ 1] */, + 0x44a38000 /* 0001128e - 0001128e [ 1] */, + 0x44a78000 /* 0001129e - 0001129e [ 1] */, + 0x44aa8005 /* 000112aa - 000112af [ 6] */, + 0x44bac004 /* 000112eb - 000112ef [ 5] */, + 0x44be8005 /* 000112fa - 000112ff [ 6] */, + 0x44c10000 /* 00011304 - 00011304 [ 1] */, + 0x44c34001 /* 0001130d - 0001130e [ 2] */, + 0x44c44001 /* 00011311 - 00011312 [ 2] */, + 0x44ca4000 /* 00011329 - 00011329 [ 1] */, + 0x44cc4000 /* 00011331 - 00011331 [ 1] */, + 0x44cd0000 /* 00011334 - 00011334 [ 1] */, + 0x44ce8000 /* 0001133a - 0001133a [ 1] */, + 0x44d14001 /* 00011345 - 00011346 [ 2] */, + 0x44d24001 /* 00011349 - 0001134a [ 2] */, + 0x44d38001 /* 0001134e - 0001134f [ 2] */, + 0x44d44005 /* 00011351 - 00011356 [ 6] */, + 0x44d60004 /* 00011358 - 0001135c [ 5] */, + 0x44d90001 /* 00011364 - 00011365 [ 2] */, + 0x44db4002 /* 0001136d - 0001136f [ 3] */, + 0x44dd408a /* 00011375 - 000113ff [ 139] */, + 0x45170000 /* 0001145c - 0001145c [ 1] */, + 0x4518801d /* 00011462 - 0001147f [ 30] */, + 0x45320007 /* 000114c8 - 000114cf [ 8] */, + 0x453680a5 /* 000114da - 0001157f [ 166] */, + 0x456d8001 /* 000115b6 - 000115b7 [ 2] */, + 0x45778021 /* 000115de - 000115ff [ 34] */, + 0x4591400a /* 00011645 - 0001164f [ 11] */, + 0x45968005 /* 0001165a - 0001165f [ 6] */, + 0x459b4012 /* 0001166d - 0001167f [ 19] */, + 0x45ae8005 /* 000116ba - 000116bf [ 6] */, + 0x45b28035 /* 000116ca - 000116ff [ 54] */, + 0x45c6c001 /* 0001171b - 0001171c [ 2] */, + 0x45cb0003 /* 0001172c - 0001172f [ 4] */, + 0x45d1c0b8 /* 00011747 - 000117ff [ 185] */, + 0x460f0063 /* 0001183c - 0001189f [ 100] */, + 0x463cc00b /* 000118f3 - 000118fe [ 12] */, + 0x4641c001 /* 00011907 - 00011908 [ 2] */, + 0x46428001 /* 0001190a - 0001190b [ 2] */, + 0x46450000 /* 00011914 - 00011914 [ 1] */, + 0x4645c000 /* 00011917 - 00011917 [ 1] */, + 0x464d8000 /* 00011936 - 00011936 [ 1] */, + 0x464e4001 /* 00011939 - 0001193a [ 2] */, + 0x4651c008 /* 00011947 - 0001194f [ 9] */, + 0x46568045 /* 0001195a - 0001199f [ 70] */, + 0x466a0001 /* 000119a8 - 000119a9 [ 2] */, + 0x46760001 /* 000119d8 - 000119d9 [ 2] */, + 0x4679401a /* 000119e5 - 000119ff [ 27] */, + 0x46920007 /* 00011a48 - 00011a4f [ 8] */, + 0x46a8c00c /* 00011aa3 - 00011aaf [ 13] */, + 0x46be4006 /* 00011af9 - 00011aff [ 7] */, + 0x46c280f5 /* 00011b0a - 00011bff [ 246] */, + 0x47024000 /* 00011c09 - 00011c09 [ 1] */, + 0x470dc000 /* 00011c37 - 00011c37 [ 1] */, + 0x47118009 /* 00011c46 - 00011c4f [ 10] */, + 0x471b4002 /* 00011c6d - 00011c6f [ 3] */, + 0x47240001 /* 00011c90 - 00011c91 [ 2] */, + 0x472a0000 /* 00011ca8 - 00011ca8 [ 1] */, + 0x472dc048 /* 00011cb7 - 00011cff [ 73] */, + 0x4741c000 /* 00011d07 - 00011d07 [ 1] */, + 0x47428000 /* 00011d0a - 00011d0a [ 1] */, + 0x474dc002 /* 00011d37 - 00011d39 [ 3] */, + 0x474ec000 /* 00011d3b - 00011d3b [ 1] */, + 0x474f8000 /* 00011d3e - 00011d3e [ 1] */, + 0x47520007 /* 00011d48 - 00011d4f [ 8] */, + 0x47568005 /* 00011d5a - 00011d5f [ 6] */, + 0x47598000 /* 00011d66 - 00011d66 [ 1] */, + 0x475a4000 /* 00011d69 - 00011d69 [ 1] */, + 0x4763c000 /* 00011d8f - 00011d8f [ 1] */, + 0x47648000 /* 00011d92 - 00011d92 [ 1] */, + 0x47664006 /* 00011d99 - 00011d9f [ 7] */, + 0x476a8135 /* 00011daa - 00011edf [ 310] */, + 0x47be4006 /* 00011ef9 - 00011eff [ 7] */, + 0x47c44000 /* 00011f11 - 00011f11 [ 1] */, + 0x47cec002 /* 00011f3b - 00011f3d [ 3] */, + 0x47d68055 /* 00011f5a - 00011faf [ 86] */, + 0x47ec400e /* 00011fb1 - 00011fbf [ 15] */, + 0x47fc800c /* 00011ff2 - 00011ffe [ 13] */, + 0x48e68065 /* 0001239a - 000123ff [ 102] */, + 0x491bc000 /* 0001246f - 0001246f [ 1] */, + 0x491d400a /* 00012475 - 0001247f [ 11] */, + 0x49510a4b /* 00012544 - 00012f8f [ 2636] */, + 0x4bfcc00c /* 00012ff3 - 00012fff [ 13] */, + 0x4d0c000f /* 00013430 - 0001343f [ 16] */, + 0x4d158fa9 /* 00013456 - 000143ff [ 4010] */, + 0x5191e1b8 /* 00014647 - 000167ff [ 8633] */, + 0x5a8e4006 /* 00016a39 - 00016a3f [ 7] */, + 0x5a97c000 /* 00016a5f - 00016a5f [ 1] */, + 0x5a9a8003 /* 00016a6a - 00016a6d [ 4] */, + 0x5aafc000 /* 00016abf - 00016abf [ 1] */, + 0x5ab28005 /* 00016aca - 00016acf [ 6] */, + 0x5abb8001 /* 00016aee - 00016aef [ 2] */, + 0x5abd8009 /* 00016af6 - 00016aff [ 10] */, + 0x5ad18009 /* 00016b46 - 00016b4f [ 10] */, + 0x5ad68000 /* 00016b5a - 00016b5a [ 1] */, + 0x5ad88000 /* 00016b62 - 00016b62 [ 1] */, + 0x5ade0004 /* 00016b78 - 00016b7c [ 5] */, + 0x5ae402af /* 00016b90 - 00016e3f [ 688] */, + 0x5ba6c064 /* 00016e9b - 00016eff [ 101] */, + 0x5bd2c003 /* 00016f4b - 00016f4e [ 4] */, + 0x5be20006 /* 00016f88 - 00016f8e [ 7] */, + 0x5be8003f /* 00016fa0 - 00016fdf [ 64] */, + 0x5bf9400a /* 00016fe5 - 00016fef [ 11] */, + 0x5bfc800d /* 00016ff2 - 00016fff [ 14] */, + 0x61fe0007 /* 000187f8 - 000187ff [ 8] */, + 0x63358029 /* 00018cd6 - 00018cff [ 42] */, + 0x634262e6 /* 00018d09 - 0001afef [ 8935] */, + 0x6bfd0000 /* 0001aff4 - 0001aff4 [ 1] */, + 0x6bff0000 /* 0001affc - 0001affc [ 1] */, + 0x6bffc000 /* 0001afff - 0001afff [ 1] */, + 0x6c48c00e /* 0001b123 - 0001b131 [ 15] */, + 0x6c4cc01c /* 0001b133 - 0001b14f [ 29] */, + 0x6c54c001 /* 0001b153 - 0001b154 [ 2] */, + 0x6c55800d /* 0001b156 - 0001b163 [ 14] */, + 0x6c5a0007 /* 0001b168 - 0001b16f [ 8] */, + 0x6cbf0903 /* 0001b2fc - 0001bbff [ 2308] */, + 0x6f1ac004 /* 0001bc6b - 0001bc6f [ 5] */, + 0x6f1f4002 /* 0001bc7d - 0001bc7f [ 3] */, + 0x6f224006 /* 0001bc89 - 0001bc8f [ 7] */, + 0x6f268001 /* 0001bc9a - 0001bc9b [ 2] */, + 0x6f28125f /* 0001bca0 - 0001ceff [ 4704] */, + 0x73cb8001 /* 0001cf2e - 0001cf2f [ 2] */, + 0x73d1c008 /* 0001cf47 - 0001cf4f [ 9] */, + 0x73f1003b /* 0001cfc4 - 0001cfff [ 60] */, + 0x743d8009 /* 0001d0f6 - 0001d0ff [ 10] */, + 0x7449c001 /* 0001d127 - 0001d128 [ 2] */, + 0x745cc007 /* 0001d173 - 0001d17a [ 8] */, + 0x747ac014 /* 0001d1eb - 0001d1ff [ 21] */, + 0x74918079 /* 0001d246 - 0001d2bf [ 122] */, + 0x74b5000b /* 0001d2d4 - 0001d2df [ 12] */, + 0x74bd000b /* 0001d2f4 - 0001d2ff [ 12] */, + 0x74d5c008 /* 0001d357 - 0001d35f [ 9] */, + 0x74de4086 /* 0001d379 - 0001d3ff [ 135] */, + 0x75154000 /* 0001d455 - 0001d455 [ 1] */, + 0x75274000 /* 0001d49d - 0001d49d [ 1] */, + 0x75280001 /* 0001d4a0 - 0001d4a1 [ 2] */, + 0x7528c001 /* 0001d4a3 - 0001d4a4 [ 2] */, + 0x7529c001 /* 0001d4a7 - 0001d4a8 [ 2] */, + 0x752b4000 /* 0001d4ad - 0001d4ad [ 1] */, + 0x752e8000 /* 0001d4ba - 0001d4ba [ 1] */, + 0x752f0000 /* 0001d4bc - 0001d4bc [ 1] */, + 0x75310000 /* 0001d4c4 - 0001d4c4 [ 1] */, + 0x75418000 /* 0001d506 - 0001d506 [ 1] */, + 0x7542c001 /* 0001d50b - 0001d50c [ 2] */, + 0x75454000 /* 0001d515 - 0001d515 [ 1] */, + 0x75474000 /* 0001d51d - 0001d51d [ 1] */, + 0x754e8000 /* 0001d53a - 0001d53a [ 1] */, + 0x754fc000 /* 0001d53f - 0001d53f [ 1] */, + 0x75514000 /* 0001d545 - 0001d545 [ 1] */, + 0x7551c002 /* 0001d547 - 0001d549 [ 3] */, + 0x75544000 /* 0001d551 - 0001d551 [ 1] */, + 0x75a98001 /* 0001d6a6 - 0001d6a7 [ 2] */, + 0x75f30001 /* 0001d7cc - 0001d7cd [ 2] */, + 0x76a3000e /* 0001da8c - 0001da9a [ 15] */, + 0x76a80000 /* 0001daa0 - 0001daa0 [ 1] */, + 0x76ac044f /* 0001dab0 - 0001deff [ 1104] */, + 0x77c7c005 /* 0001df1f - 0001df24 [ 6] */, + 0x77cac0d4 /* 0001df2b - 0001dfff [ 213] */, + 0x7801c000 /* 0001e007 - 0001e007 [ 1] */, + 0x78064001 /* 0001e019 - 0001e01a [ 2] */, + 0x78088000 /* 0001e022 - 0001e022 [ 1] */, + 0x78094000 /* 0001e025 - 0001e025 [ 1] */, + 0x780ac004 /* 0001e02b - 0001e02f [ 5] */, + 0x781b8020 /* 0001e06e - 0001e08e [ 33] */, + 0x7824006f /* 0001e090 - 0001e0ff [ 112] */, + 0x784b4002 /* 0001e12d - 0001e12f [ 3] */, + 0x784f8001 /* 0001e13e - 0001e13f [ 2] */, + 0x78528003 /* 0001e14a - 0001e14d [ 4] */, + 0x7854013f /* 0001e150 - 0001e28f [ 320] */, + 0x78abc010 /* 0001e2af - 0001e2bf [ 17] */, + 0x78be8004 /* 0001e2fa - 0001e2fe [ 5] */, + 0x78c001cf /* 0001e300 - 0001e4cf [ 464] */, + 0x793e82e5 /* 0001e4fa - 0001e7df [ 742] */, + 0x79f9c000 /* 0001e7e7 - 0001e7e7 [ 1] */, + 0x79fb0000 /* 0001e7ec - 0001e7ec [ 1] */, + 0x79fbc000 /* 0001e7ef - 0001e7ef [ 1] */, + 0x79ffc000 /* 0001e7ff - 0001e7ff [ 1] */, + 0x7a314001 /* 0001e8c5 - 0001e8c6 [ 2] */, + 0x7a35c028 /* 0001e8d7 - 0001e8ff [ 41] */, + 0x7a530003 /* 0001e94c - 0001e94f [ 4] */, + 0x7a568003 /* 0001e95a - 0001e95d [ 4] */, + 0x7a580310 /* 0001e960 - 0001ec70 [ 785] */, + 0x7b2d404b /* 0001ecb5 - 0001ed00 [ 76] */, + 0x7b4f80c1 /* 0001ed3e - 0001edff [ 194] */, + 0x7b810000 /* 0001ee04 - 0001ee04 [ 1] */, + 0x7b880000 /* 0001ee20 - 0001ee20 [ 1] */, + 0x7b88c000 /* 0001ee23 - 0001ee23 [ 1] */, + 0x7b894001 /* 0001ee25 - 0001ee26 [ 2] */, + 0x7b8a0000 /* 0001ee28 - 0001ee28 [ 1] */, + 0x7b8cc000 /* 0001ee33 - 0001ee33 [ 1] */, + 0x7b8e0000 /* 0001ee38 - 0001ee38 [ 1] */, + 0x7b8e8000 /* 0001ee3a - 0001ee3a [ 1] */, + 0x7b8f0005 /* 0001ee3c - 0001ee41 [ 6] */, + 0x7b90c003 /* 0001ee43 - 0001ee46 [ 4] */, + 0x7b920000 /* 0001ee48 - 0001ee48 [ 1] */, + 0x7b928000 /* 0001ee4a - 0001ee4a [ 1] */, + 0x7b930000 /* 0001ee4c - 0001ee4c [ 1] */, + 0x7b940000 /* 0001ee50 - 0001ee50 [ 1] */, + 0x7b94c000 /* 0001ee53 - 0001ee53 [ 1] */, + 0x7b954001 /* 0001ee55 - 0001ee56 [ 2] */, + 0x7b960000 /* 0001ee58 - 0001ee58 [ 1] */, + 0x7b968000 /* 0001ee5a - 0001ee5a [ 1] */, + 0x7b970000 /* 0001ee5c - 0001ee5c [ 1] */, + 0x7b978000 /* 0001ee5e - 0001ee5e [ 1] */, + 0x7b980000 /* 0001ee60 - 0001ee60 [ 1] */, + 0x7b98c000 /* 0001ee63 - 0001ee63 [ 1] */, + 0x7b994001 /* 0001ee65 - 0001ee66 [ 2] */, + 0x7b9ac000 /* 0001ee6b - 0001ee6b [ 1] */, + 0x7b9cc000 /* 0001ee73 - 0001ee73 [ 1] */, + 0x7b9e0000 /* 0001ee78 - 0001ee78 [ 1] */, + 0x7b9f4000 /* 0001ee7d - 0001ee7d [ 1] */, + 0x7b9fc000 /* 0001ee7f - 0001ee7f [ 1] */, + 0x7ba28000 /* 0001ee8a - 0001ee8a [ 1] */, + 0x7ba70004 /* 0001ee9c - 0001eea0 [ 5] */, + 0x7ba90000 /* 0001eea4 - 0001eea4 [ 1] */, + 0x7baa8000 /* 0001eeaa - 0001eeaa [ 1] */, + 0x7baf0033 /* 0001eebc - 0001eeef [ 52] */, + 0x7bbc810d /* 0001eef2 - 0001efff [ 270] */, + 0x7c0b0003 /* 0001f02c - 0001f02f [ 4] */, + 0x7c25000b /* 0001f094 - 0001f09f [ 12] */, + 0x7c2bc001 /* 0001f0af - 0001f0b0 [ 2] */, + 0x7c300000 /* 0001f0c0 - 0001f0c0 [ 1] */, + 0x7c340000 /* 0001f0d0 - 0001f0d0 [ 1] */, + 0x7c3d8009 /* 0001f0f6 - 0001f0ff [ 10] */, + 0x7c6b8037 /* 0001f1ae - 0001f1e5 [ 56] */, + 0x7c80c00c /* 0001f203 - 0001f20f [ 13] */, + 0x7c8f0003 /* 0001f23c - 0001f23f [ 4] */, + 0x7c924006 /* 0001f249 - 0001f24f [ 7] */, + 0x7c94800d /* 0001f252 - 0001f25f [ 14] */, + 0x7c998099 /* 0001f266 - 0001f2ff [ 154] */, + 0x7db60003 /* 0001f6d8 - 0001f6db [ 4] */, + 0x7dbb4002 /* 0001f6ed - 0001f6ef [ 3] */, + 0x7dbf4002 /* 0001f6fd - 0001f6ff [ 3] */, + 0x7dddc003 /* 0001f777 - 0001f77a [ 4] */, + 0x7df68005 /* 0001f7da - 0001f7df [ 6] */, + 0x7dfb0003 /* 0001f7ec - 0001f7ef [ 4] */, + 0x7dfc400e /* 0001f7f1 - 0001f7ff [ 15] */, + 0x7e030003 /* 0001f80c - 0001f80f [ 4] */, + 0x7e120007 /* 0001f848 - 0001f84f [ 8] */, + 0x7e168005 /* 0001f85a - 0001f85f [ 6] */, + 0x7e220007 /* 0001f888 - 0001f88f [ 8] */, + 0x7e2b8001 /* 0001f8ae - 0001f8af [ 2] */, + 0x7e2c804d /* 0001f8b2 - 0001f8ff [ 78] */, + 0x7e95000b /* 0001fa54 - 0001fa5f [ 12] */, + 0x7e9b8001 /* 0001fa6e - 0001fa6f [ 2] */, + 0x7e9f4002 /* 0001fa7d - 0001fa7f [ 3] */, + 0x7ea24006 /* 0001fa89 - 0001fa8f [ 7] */, + 0x7eaf8000 /* 0001fabe - 0001fabe [ 1] */, + 0x7eb18007 /* 0001fac6 - 0001facd [ 8] */, + 0x7eb70003 /* 0001fadc - 0001fadf [ 4] */, + 0x7eba4006 /* 0001fae9 - 0001faef [ 7] */, + 0x7ebe4006 /* 0001faf9 - 0001faff [ 7] */, + 0x7ee4c000 /* 0001fb93 - 0001fb93 [ 1] */, + 0x7ef2c024 /* 0001fbcb - 0001fbef [ 37] */, + 0x7efe8405 /* 0001fbfa - 0001ffff [ 1030] */, + 0xa9b8001f /* 0002a6e0 - 0002a6ff [ 32] */, + 0xadce8005 /* 0002b73a - 0002b73f [ 6] */, + 0xae078001 /* 0002b81e - 0002b81f [ 2] */, + 0xb3a8800d /* 0002cea2 - 0002ceaf [ 14] */, + 0xbaf8400e /* 0002ebe1 - 0002ebef [ 15] */, + 0xbb9789a1 /* 0002ee5e - 0002f7ff [ 2466] */, + 0xbe8785e1 /* 0002fa1e - 0002ffff [ 1506] */, + 0xc4d2c004 /* 0003134b - 0003134f [ 5] */}; /// Returns whether the code unit needs to be escaped. /// -/// \pre The code point is a valid Unicode code point. +/// At the end of the valid Unicode code points space a lot of code points are +/// either reserved or a noncharacter. Adding all these entries to the +/// lookup table would greatly increase the size of the table. Instead these +/// entries are manually processed. In this large area of reserved code points, +/// there is a small area of extended graphemes that should not be escaped +/// unconditionally. This is also manually coded. See the generation script for +/// more details. + +/// +/// \\pre The code point is a valid Unicode code point. [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool __needs_escape(const char32_t __code_point) noexcept { - // Since __unallocated_region_lower_bound contains the unshifted range do the - // comparison without shifting. - if (__code_point >= __unallocated_region_lower_bound) + + // The entries in the gap at the end. + if(__code_point >= 0x000e0100 && __code_point <= 0x000e01ef) + return false; + + // The entries at the end. + if (__code_point >= 0x000323b0) return true; - ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 11) | 0x7ffu) - __entries; + ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 14) | 0x3fffu) - __entries; if (__i == 0) return false; --__i; - uint32_t __upper_bound = (__entries[__i] >> 11) + (__entries[__i] & 0x7ffu); + uint32_t __upper_bound = (__entries[__i] >> 14) + (__entries[__i] & 0x3fffu); return __code_point <= __upper_bound; } diff --git a/lib/libcxx/include/__format/extended_grapheme_cluster_table.h b/lib/libcxx/include/__format/extended_grapheme_cluster_table.h index 9616dfecd6..48581d8a5d 100644 --- a/lib/libcxx/include/__format/extended_grapheme_cluster_table.h +++ b/lib/libcxx/include/__format/extended_grapheme_cluster_table.h @@ -125,7 +125,7 @@ enum class __property : uint8_t { /// following benchmark. /// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp // clang-format off -inline constexpr uint32_t __entries[1496] = { +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[1496] = { 0x00000091, 0x00005005, 0x00005811, diff --git a/lib/libcxx/include/__format/format_arg.h b/lib/libcxx/include/__format/format_arg.h index 34ed9bcd6d..aa02f81dc4 100644 --- a/lib/libcxx/include/__format/format_arg.h +++ b/lib/libcxx/include/__format/format_arg.h @@ -14,11 +14,12 @@ #include <__concepts/arithmetic.h> #include <__config> #include <__format/concepts.h> -#include <__format/format_fwd.h> #include <__format/format_parse_context.h> #include <__functional/invoke.h> +#include <__fwd/format.h> #include <__memory/addressof.h> #include <__type_traits/conditional.h> +#include <__type_traits/remove_const.h> #include <__utility/forward.h> #include <__utility/move.h> #include <__utility/unreachable.h> @@ -96,7 +97,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr __arg_t __get_packed_type(uint64_t __types, size } // namespace __format -// This function is not user obervable, so it can directly use the non-standard +// This function is not user observable, so it can directly use the non-standard // types of the "variant". See __arg_t for more details. template _LIBCPP_HIDE_FROM_ABI decltype(auto) __visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { @@ -147,6 +148,59 @@ _LIBCPP_HIDE_FROM_ABI decltype(auto) __visit_format_arg(_Visitor&& __vis, basic_ __libcpp_unreachable(); } +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + +template +_LIBCPP_HIDE_FROM_ABI _Rp __visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { + switch (__arg.__type_) { + case __format::__arg_t::__none: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__monostate_); + case __format::__arg_t::__boolean: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__boolean_); + case __format::__arg_t::__char_type: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__char_type_); + case __format::__arg_t::__int: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__int_); + case __format::__arg_t::__long_long: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__long_long_); + case __format::__arg_t::__i128: +# ifndef _LIBCPP_HAS_NO_INT128 + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__i128_); +# else + __libcpp_unreachable(); +# endif + case __format::__arg_t::__unsigned: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__unsigned_); + case __format::__arg_t::__unsigned_long_long: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__unsigned_long_long_); + case __format::__arg_t::__u128: +# ifndef _LIBCPP_HAS_NO_INT128 + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__u128_); +# else + __libcpp_unreachable(); +# endif + case __format::__arg_t::__float: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__float_); + case __format::__arg_t::__double: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__double_); + case __format::__arg_t::__long_double: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__long_double_); + case __format::__arg_t::__const_char_type_ptr: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__const_char_type_ptr_); + case __format::__arg_t::__string_view: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__string_view_); + case __format::__arg_t::__ptr: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__ptr_); + case __format::__arg_t::__handle: + return std::invoke_r<_Rp>( + std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__arg.__value_.__handle_}); + } + + __libcpp_unreachable(); +} + +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + /// Contains the values used in basic_format_arg. /// /// This is a separate type so it's possible to store the values and types in @@ -230,6 +284,52 @@ public: _LIBCPP_HIDE_FROM_ABI explicit operator bool() const noexcept { return __type_ != __format::__arg_t::__none; } +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + + // This function is user facing, so it must wrap the non-standard types of + // the "variant" in a handle to stay conforming. See __arg_t for more details. + template + _LIBCPP_HIDE_FROM_ABI decltype(auto) visit(this basic_format_arg __arg, _Visitor&& __vis) { + switch (__arg.__type_) { +# ifndef _LIBCPP_HAS_NO_INT128 + case __format::__arg_t::__i128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__i128_}; + return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } + + case __format::__arg_t::__u128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; + return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } +# endif + default: + return std::__visit_format_arg(std::forward<_Visitor>(__vis), __arg); + } + } + + // This function is user facing, so it must wrap the non-standard types of + // the "variant" in a handle to stay conforming. See __arg_t for more details. + template + _LIBCPP_HIDE_FROM_ABI _Rp visit(this basic_format_arg __arg, _Visitor&& __vis) { + switch (__arg.__type_) { +# ifndef _LIBCPP_HAS_NO_INT128 + case __format::__arg_t::__i128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__i128_}; + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } + + case __format::__arg_t::__u128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } +# endif + default: + return std::__visit_format_arg<_Rp>(std::forward<_Visitor>(__vis), __arg); + } + } + +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + private: using char_type = typename _Context::char_type; @@ -270,7 +370,11 @@ private: // This function is user facing, so it must wrap the non-standard types of // the "variant" in a handle to stay conforming. See __arg_t for more details. template -_LIBCPP_HIDE_FROM_ABI decltype(auto) visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) +_LIBCPP_DEPRECATED_IN_CXX26 +# endif + _LIBCPP_HIDE_FROM_ABI decltype(auto) + visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { switch (__arg.__type_) { # ifndef _LIBCPP_HAS_NO_INT128 case __format::__arg_t::__i128: { @@ -282,7 +386,7 @@ _LIBCPP_HIDE_FROM_ABI decltype(auto) visit_format_arg(_Visitor&& __vis, basic_fo typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); } -# endif +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) default: return std::__visit_format_arg(std::forward<_Visitor>(__vis), __arg); } diff --git a/lib/libcxx/include/__format/format_arg_store.h b/lib/libcxx/include/__format/format_arg_store.h index 066cd369eb..23a599e995 100644 --- a/lib/libcxx/include/__format/format_arg_store.h +++ b/lib/libcxx/include/__format/format_arg_store.h @@ -151,7 +151,7 @@ consteval __arg_t __determine_arg_t() { // The overload for not formattable types allows triggering the static // assertion below. template - requires(!__formattable<_Tp, typename _Context::char_type>) + requires(!__formattable_with<_Tp, _Context>) consteval __arg_t __determine_arg_t() { return __arg_t::__none; } @@ -165,7 +165,6 @@ _LIBCPP_HIDE_FROM_ABI basic_format_arg<_Context> __create_format_arg(_Tp& __valu using _Dp = remove_const_t<_Tp>; constexpr __arg_t __arg = __determine_arg_t<_Context, _Dp>(); static_assert(__arg != __arg_t::__none, "the supplied type is not formattable"); - static_assert(__formattable_with<_Tp, _Context>); // Not all types can be used to directly initialize the diff --git a/lib/libcxx/include/__format/format_args.h b/lib/libcxx/include/__format/format_args.h index 9e0afecc0a..07923570f3 100644 --- a/lib/libcxx/include/__format/format_args.h +++ b/lib/libcxx/include/__format/format_args.h @@ -10,11 +10,10 @@ #ifndef _LIBCPP___FORMAT_FORMAT_ARGS_H #define _LIBCPP___FORMAT_FORMAT_ARGS_H -#include <__availability> #include <__config> #include <__format/format_arg.h> #include <__format/format_arg_store.h> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #include #include @@ -29,8 +28,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD template class _LIBCPP_TEMPLATE_VIS basic_format_args { public: - basic_format_args() noexcept = default; - template _LIBCPP_HIDE_FROM_ABI basic_format_args(const __format_arg_store<_Context, _Args...>& __store) noexcept : __size_(sizeof...(_Args)) { diff --git a/lib/libcxx/include/__format/format_context.h b/lib/libcxx/include/__format/format_context.h index edb0348b34..20c07559ea 100644 --- a/lib/libcxx/include/__format/format_context.h +++ b/lib/libcxx/include/__format/format_context.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMAT_CONTEXT_H #define _LIBCPP___FORMAT_FORMAT_CONTEXT_H -#include <__availability> #include <__concepts/same_as.h> #include <__config> #include <__format/buffer.h> @@ -18,7 +17,7 @@ #include <__format/format_arg_store.h> #include <__format/format_args.h> #include <__format/format_error.h> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #include <__iterator/back_insert_iterator.h> #include <__iterator/concepts.h> #include <__memory/addressof.h> @@ -27,7 +26,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> # include #endif @@ -132,6 +131,9 @@ private: _LIBCPP_HIDE_FROM_ABI explicit basic_format_context(_OutIt __out_it, basic_format_args __args) : __out_it_(std::move(__out_it)), __args_(__args) {} # endif + + basic_format_context(const basic_format_context&) = delete; + basic_format_context& operator=(const basic_format_context&) = delete; }; // A specialization for __retarget_buffer @@ -166,20 +168,25 @@ public: # endif __ctx_(std::addressof(__ctx)), __arg_([](void* __c, size_t __id) { - return std::visit_format_arg( - [&](auto __arg) -> basic_format_arg { - if constexpr (same_as) - return {}; - else if constexpr (same_as::handle>) - // At the moment it's not possible for formatting to use a re-targeted handle. - // TODO FMT add this when support is needed. - std::__throw_format_error("Re-targeting handle not supported"); - else - return basic_format_arg{ - __format::__determine_arg_t(), - __basic_format_arg_value(__arg)}; - }, - static_cast<_Context*>(__c)->arg(__id)); + auto __visitor = [&](auto __arg) -> basic_format_arg { + if constexpr (same_as) + return {}; + else if constexpr (same_as::handle>) + // At the moment it's not possible for formatting to use a re-targeted handle. + // TODO FMT add this when support is needed. + std::__throw_format_error("Re-targeting handle not supported"); + else + return basic_format_arg{ + __format::__determine_arg_t(), + __basic_format_arg_value(__arg)}; + }; +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + return static_cast<_Context*>(__c)->arg(__id).visit(std::move(__visitor)); +# else + _LIBCPP_SUPPRESS_DEPRECATED_PUSH + return std::visit_format_arg(std::move(__visitor), static_cast<_Context*>(__c)->arg(__id)); + _LIBCPP_SUPPRESS_DEPRECATED_POP +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) }) { } diff --git a/lib/libcxx/include/__format/format_functions.h b/lib/libcxx/include/__format/format_functions.h index cf833ad205..d14b49aff1 100644 --- a/lib/libcxx/include/__format/format_functions.h +++ b/lib/libcxx/include/__format/format_functions.h @@ -41,7 +41,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -66,15 +66,14 @@ using wformat_args = basic_format_args; # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI __format_arg_store<_Context, _Args...> make_format_args(_Args&... __args) { - return _VSTD::__format_arg_store<_Context, _Args...>(__args...); +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI __format_arg_store<_Context, _Args...> make_format_args(_Args&... __args) { + return std::__format_arg_store<_Context, _Args...>(__args...); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI __format_arg_store -make_wformat_args(_Args&... __args) { - return _VSTD::__format_arg_store(__args...); +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI __format_arg_store make_wformat_args(_Args&... __args) { + return std::__format_arg_store(__args...); } # endif @@ -452,8 +451,7 @@ format_to(_OutIt __out_it, wformat_string<_Args...> __fmt, _Args&&... __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string -vformat(string_view __fmt, format_args __args) { +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string vformat(string_view __fmt, format_args __args) { string __res; std::vformat_to(std::back_inserter(__res), __fmt, __args); return __res; @@ -463,7 +461,7 @@ vformat(string_view __fmt, format_args __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring vformat(wstring_view __fmt, wformat_args __args) { wstring __res; std::vformat_to(std::back_inserter(__res), __fmt, __args); @@ -472,14 +470,14 @@ vformat(wstring_view __fmt, wformat_args __args) { # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string format(format_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(__fmt.get(), std::make_format_args(__args...)); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring format(wformat_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(__fmt.get(), std::make_wformat_args(__args...)); } @@ -520,14 +518,14 @@ _LIBCPP_HIDE_FROM_ABI size_t __vformatted_size(basic_string_view<_CharT> __fmt, } template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(format_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(__fmt.get(), basic_format_args{std::make_format_args(__args...)}); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(wformat_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(__fmt.get(), basic_format_args{std::make_wformat_args(__args...)}); } @@ -585,7 +583,7 @@ format_to(_OutIt __out_it, locale __loc, wformat_string<_Args...> __fmt, _Args&& // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string vformat(locale __loc, string_view __fmt, format_args __args) { string __res; std::vformat_to(std::back_inserter(__res), std::move(__loc), __fmt, __args); @@ -596,7 +594,7 @@ vformat(locale __loc, string_view __fmt, format_args __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring vformat(locale __loc, wstring_view __fmt, wformat_args __args) { wstring __res; std::vformat_to(std::back_inserter(__res), std::move(__loc), __fmt, __args); @@ -605,14 +603,14 @@ vformat(locale __loc, wstring_view __fmt, wformat_args __args) { # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string format(locale __loc, format_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(std::move(__loc), __fmt.get(), std::make_format_args(__args...)); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring format(locale __loc, wformat_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(std::move(__loc), __fmt.get(), std::make_wformat_args(__args...)); } @@ -658,14 +656,14 @@ _LIBCPP_HIDE_FROM_ABI size_t __vformatted_size(locale __loc, basic_string_view<_ } template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(locale __loc, format_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(std::move(__loc), __fmt.get(), basic_format_args{std::make_format_args(__args...)}); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(locale __loc, wformat_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(std::move(__loc), __fmt.get(), basic_format_args{std::make_wformat_args(__args...)}); } diff --git a/lib/libcxx/include/__format/formatter.h b/lib/libcxx/include/__format/formatter.h index 079befc5bd..e2f418f936 100644 --- a/lib/libcxx/include/__format/formatter.h +++ b/lib/libcxx/include/__format/formatter.h @@ -10,9 +10,8 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_H #define _LIBCPP___FORMAT_FORMATTER_H -#include <__availability> #include <__config> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__format/formatter_bool.h b/lib/libcxx/include/__format/formatter_bool.h index 1c479501b6..17dc69541e 100644 --- a/lib/libcxx/include/__format/formatter_bool.h +++ b/lib/libcxx/include/__format/formatter_bool.h @@ -12,7 +12,6 @@ #include <__algorithm/copy.h> #include <__assert> -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> @@ -22,7 +21,7 @@ #include <__utility/unreachable.h> #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_char.h b/lib/libcxx/include/__format/formatter_char.h index 3358d42225..d33e84368a 100644 --- a/lib/libcxx/include/__format/formatter_char.h +++ b/lib/libcxx/include/__format/formatter_char.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_CHAR_H #define _LIBCPP___FORMAT_FORMATTER_CHAR_H -#include <__availability> #include <__concepts/same_as.h> #include <__config> #include <__format/concepts.h> diff --git a/lib/libcxx/include/__format/formatter_floating_point.h b/lib/libcxx/include/__format/formatter_floating_point.h index 46a090a787..fa42ba203b 100644 --- a/lib/libcxx/include/__format/formatter_floating_point.h +++ b/lib/libcxx/include/__format/formatter_floating_point.h @@ -16,6 +16,7 @@ #include <__algorithm/min.h> #include <__algorithm/rotate.h> #include <__algorithm/transform.h> +#include <__assert> #include <__charconv/chars_format.h> #include <__charconv/to_chars_floating_point.h> #include <__charconv/to_chars_result.h> @@ -38,7 +39,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_integer.h b/lib/libcxx/include/__format/formatter_integer.h index d57082b388..41400f0047 100644 --- a/lib/libcxx/include/__format/formatter_integer.h +++ b/lib/libcxx/include/__format/formatter_integer.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_INTEGER_H #define _LIBCPP___FORMAT_FORMATTER_INTEGER_H -#include <__availability> #include <__concepts/arithmetic.h> #include <__config> #include <__format/concepts.h> diff --git a/lib/libcxx/include/__format/formatter_integral.h b/lib/libcxx/include/__format/formatter_integral.h index e0217a2400..eca966f888 100644 --- a/lib/libcxx/include/__format/formatter_integral.h +++ b/lib/libcxx/include/__format/formatter_integral.h @@ -32,7 +32,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_output.h b/lib/libcxx/include/__format/formatter_output.h index d5038eb158..1498f64c4a 100644 --- a/lib/libcxx/include/__format/formatter_output.h +++ b/lib/libcxx/include/__format/formatter_output.h @@ -100,8 +100,8 @@ __padding_size(size_t __size, size_t __width, __format_spec::__alignment __align /// /// This uses a "mass output function" of __format::__output_buffer when possible. template <__fmt_char_type _CharT, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(basic_string_view<_CharT> __str, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(basic_string_view<_CharT> __str, output_iterator auto __out_it) -> decltype(__out_it) { if constexpr (std::same_as>>) { __out_it.__get_container()->__copy(__str); return __out_it; @@ -116,16 +116,16 @@ _LIBCPP_HIDE_FROM_ABI auto __copy(basic_string_view<_CharT> __str, output_iterat template ::value_type, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(_Iterator __first, _Iterator __last, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(_Iterator __first, _Iterator __last, output_iterator auto __out_it) -> decltype(__out_it) { return __formatter::__copy(basic_string_view{__first, __last}, std::move(__out_it)); } template ::value_type, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(_Iterator __first, size_t __n, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(_Iterator __first, size_t __n, output_iterator auto __out_it) -> decltype(__out_it) { return __formatter::__copy(basic_string_view{std::to_address(__first), __n}, std::move(__out_it)); } @@ -136,9 +136,11 @@ template ::value_type, __fmt_char_type _OutCharT = _CharT, class _UnaryOperation> -_LIBCPP_HIDE_FROM_ABI auto __transform( - _Iterator __first, _Iterator __last, output_iterator auto __out_it, _UnaryOperation __operation) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__transform(_Iterator __first, + _Iterator __last, + output_iterator auto __out_it, + _UnaryOperation __operation) -> decltype(__out_it) { if constexpr (std::same_as>>) { __out_it.__get_container()->__transform(__first, __last, std::move(__operation)); return __out_it; diff --git a/lib/libcxx/include/__format/formatter_pointer.h b/lib/libcxx/include/__format/formatter_pointer.h index 3373996ec3..6941343efd 100644 --- a/lib/libcxx/include/__format/formatter_pointer.h +++ b/lib/libcxx/include/__format/formatter_pointer.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_POINTER_H #define _LIBCPP___FORMAT_FORMATTER_POINTER_H -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> diff --git a/lib/libcxx/include/__format/formatter_string.h b/lib/libcxx/include/__format/formatter_string.h index d1ccfb9b5f..347439fc8d 100644 --- a/lib/libcxx/include/__format/formatter_string.h +++ b/lib/libcxx/include/__format/formatter_string.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_STRING_H #define _LIBCPP___FORMAT_FORMATTER_STRING_H -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> diff --git a/lib/libcxx/include/__format/indic_conjunct_break_table.h b/lib/libcxx/include/__format/indic_conjunct_break_table.h new file mode 100644 index 0000000000..44521d2749 --- /dev/null +++ b/lib/libcxx/include/__format/indic_conjunct_break_table.h @@ -0,0 +1,350 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// WARNING, this entire header is generated by +// utils/generate_indic_conjunct_break_table.py +// DO NOT MODIFY! + +// UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE +// +// See Terms of Use +// for definitions of Unicode Inc.'s Data Files and Software. +// +// NOTICE TO USER: Carefully read the following legal agreement. +// BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +// DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +// YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +// TERMS AND CONDITIONS OF THIS AGREEMENT. +// IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +// THE DATA FILES OR SOFTWARE. +// +// COPYRIGHT AND PERMISSION NOTICE +// +// Copyright (c) 1991-2022 Unicode, Inc. All rights reserved. +// Distributed under the Terms of Use in https://www.unicode.org/copyright.html. +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of the Unicode data files and any associated documentation +// (the "Data Files") or Unicode software and any associated documentation +// (the "Software") to deal in the Data Files or Software +// without restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, and/or sell copies of +// the Data Files or Software, and to permit persons to whom the Data Files +// or Software are furnished to do so, provided that either +// (a) this copyright and permission notice appear with all copies +// of the Data Files or Software, or +// (b) this copyright and permission notice appear in associated +// Documentation. +// +// THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT OF THIRD PARTY RIGHTS. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +// NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +// DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +// DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THE DATA FILES OR SOFTWARE. +// +// Except as contained in this notice, the name of a copyright holder +// shall not be used in advertising or otherwise to promote the sale, +// use or other dealings in these Data Files or Software without prior +// written authorization of the copyright holder. + +#ifndef _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H +#define _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H + +#include <__algorithm/ranges_upper_bound.h> +#include <__config> +#include <__iterator/access.h> +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +namespace __indic_conjunct_break { + +enum class __property : uint8_t { + // Values generated from the data files. + __Consonant, + __Extend, + __Linker, + + // The code unit has none of above properties. + __none +}; + +/// The entries of the indic conjunct break property table. +/// +/// The data is generated from +/// - https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt +/// +/// The data has 3 values +/// - bits [0, 1] The property. One of the values generated from the datafiles +/// of \ref __property +/// - bits [2, 10] The size of the range. +/// - bits [11, 31] The lower bound code point of the range. The upper bound of +/// the range is lower bound + size. +/// +/// The 9 bits for the size allow a maximum range of 512 elements. Some ranges +/// in the Unicode tables are larger. They are stored in multiple consecutive +/// ranges in the data table. An alternative would be to store the sizes in a +/// separate 16-bit value. The original MSVC STL code had such an approach, but +/// this approach uses less space for the data and is about 4% faster in the +/// following benchmark. +/// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp +// clang-format off +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[201] = { + 0x00180139, + 0x001a807d, + 0x00241811, + 0x002c88b1, + 0x002df801, + 0x002e0805, + 0x002e2005, + 0x002e3801, + 0x00308029, + 0x00325851, + 0x00338001, + 0x0036b019, + 0x0036f815, + 0x00373805, + 0x0037500d, + 0x00388801, + 0x00398069, + 0x003f5821, + 0x003fe801, + 0x0040b00d, + 0x0040d821, + 0x00412809, + 0x00414811, + 0x0042c809, + 0x0044c01d, + 0x0046505d, + 0x00471871, + 0x0048a890, + 0x0049e001, + 0x004a6802, + 0x004a880d, + 0x004ac01c, + 0x004bc01c, + 0x004ca84c, + 0x004d5018, + 0x004d9000, + 0x004db00c, + 0x004de001, + 0x004e6802, + 0x004ee004, + 0x004ef800, + 0x004f8004, + 0x004ff001, + 0x0051e001, + 0x0054a84c, + 0x00555018, + 0x00559004, + 0x0055a810, + 0x0055e001, + 0x00566802, + 0x0057c800, + 0x0058a84c, + 0x00595018, + 0x00599004, + 0x0059a810, + 0x0059e001, + 0x005a6802, + 0x005ae004, + 0x005af800, + 0x005b8800, + 0x0060a84c, + 0x0061503c, + 0x0061e001, + 0x00626802, + 0x0062a805, + 0x0062c008, + 0x0065e001, + 0x0068a894, + 0x0069d805, + 0x006a6802, + 0x0071c009, + 0x0072400d, + 0x0075c009, + 0x0076400d, + 0x0078c005, + 0x0079a801, + 0x0079b801, + 0x0079c801, + 0x007b8805, + 0x007ba001, + 0x007bd00d, + 0x007c0001, + 0x007c1009, + 0x007c3005, + 0x007e3001, + 0x0081b801, + 0x0081c805, + 0x00846801, + 0x009ae809, + 0x00b8a001, + 0x00be9001, + 0x00bee801, + 0x00c54801, + 0x00c9c809, + 0x00d0b805, + 0x00d30001, + 0x00d3a81d, + 0x00d3f801, + 0x00d58035, + 0x00d5f83d, + 0x00d9a001, + 0x00db5821, + 0x00dd5801, + 0x00df3001, + 0x00e1b801, + 0x00e68009, + 0x00e6a031, + 0x00e71019, + 0x00e76801, + 0x00e7a001, + 0x00e7c005, + 0x00ee00fd, + 0x01006801, + 0x01068031, + 0x01070801, + 0x0107282d, + 0x01677809, + 0x016bf801, + 0x016f007d, + 0x01815015, + 0x0184c805, + 0x05337801, + 0x0533a025, + 0x0534f005, + 0x05378005, + 0x05416001, + 0x05470045, + 0x05495809, + 0x054d9801, + 0x05558001, + 0x05559009, + 0x0555b805, + 0x0555f005, + 0x05560801, + 0x0557b001, + 0x055f6801, + 0x07d8f001, + 0x07f1003d, + 0x080fe801, + 0x08170001, + 0x081bb011, + 0x08506801, + 0x08507801, + 0x0851c009, + 0x0851f801, + 0x08572805, + 0x0869200d, + 0x08755805, + 0x0877e809, + 0x087a3029, + 0x087c100d, + 0x08838001, + 0x0883f801, + 0x0885d001, + 0x08880009, + 0x08899805, + 0x088b9801, + 0x088e5001, + 0x0891b001, + 0x08974805, + 0x0899d805, + 0x089b3019, + 0x089b8011, + 0x08a23001, + 0x08a2f001, + 0x08a61801, + 0x08ae0001, + 0x08b5b801, + 0x08b95801, + 0x08c1d001, + 0x08c9f001, + 0x08ca1801, + 0x08d1a001, + 0x08d23801, + 0x08d4c801, + 0x08ea1001, + 0x08ea2005, + 0x08ecb801, + 0x08fa1001, + 0x0b578011, + 0x0b598019, + 0x0de4f001, + 0x0e8b2801, + 0x0e8b3809, + 0x0e8b7011, + 0x0e8bd81d, + 0x0e8c2819, + 0x0e8d500d, + 0x0e921009, + 0x0f000019, + 0x0f004041, + 0x0f00d819, + 0x0f011805, + 0x0f013011, + 0x0f047801, + 0x0f098019, + 0x0f157001, + 0x0f17600d, + 0x0f27600d, + 0x0f468019, + 0x0f4a2019}; +// clang-format on + +/// Returns the indic conjuct break property of a code point. +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __property __get_property(const char32_t __code_point) noexcept { + // The algorithm searches for the upper bound of the range and, when found, + // steps back one entry. This algorithm is used since the code point can be + // anywhere in the range. After a lower bound is found the next step is to + // compare whether the code unit is indeed in the range. + // + // Since the entry contains a code unit, size, and property the code point + // being sought needs to be adjusted. Just shifting the code point to the + // proper position doesn't work; suppose an entry has property 0, size 1, + // and lower bound 3. This results in the entry 0x1810. + // When searching for code point 3 it will search for 0x1800, find 0x1810 + // and moves to the previous entry. Thus the lower bound value will never + // be found. + // The simple solution is to set the bits belonging to the property and + // size. Then the upper bound for code point 3 will return the entry after + // 0x1810. After moving to the previous entry the algorithm arrives at the + // correct entry. + ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 11) | 0x7ffu) - __entries; + if (__i == 0) + return __property::__none; + + --__i; + uint32_t __upper_bound = (__entries[__i] >> 11) + ((__entries[__i] >> 2) & 0b1'1111'1111); + if (__code_point <= __upper_bound) + return static_cast<__property>(__entries[__i] & 0b11); + + return __property::__none; +} + +} // namespace __indic_conjunct_break + +#endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H diff --git a/lib/libcxx/include/__format/parser_std_format_spec.h b/lib/libcxx/include/__format/parser_std_format_spec.h index cf8af87b21..150bdde89f 100644 --- a/lib/libcxx/include/__format/parser_std_format_spec.h +++ b/lib/libcxx/include/__format/parser_std_format_spec.h @@ -129,8 +129,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr uint32_t __substitute_arg_id(basic_format_arg<_C /// /// They default to false so when a new field is added it needs to be opted in /// explicitly. -// TODO FMT Use an ABI tag for this struct. -struct __fields { +struct _LIBCPP_HIDE_FROM_ABI __fields { uint16_t __sign_ : 1 {false}; uint16_t __alternate_form_ : 1 {false}; uint16_t __zero_padding_ : 1 {false}; @@ -355,10 +354,10 @@ public: _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator __parse(_ParseContext& __ctx, __fields __fields) { auto __begin = __ctx.begin(); auto __end = __ctx.end(); - if (__begin == __end) + if (__begin == __end || *__begin == _CharT('}') || (__fields.__use_range_fill_ && *__begin == _CharT(':'))) return __begin; - if (__parse_fill_align(__begin, __end, __fields.__use_range_fill_) && __begin == __end) + if (__parse_fill_align(__begin, __end) && __begin == __end) return __begin; if (__fields.__sign_) { @@ -574,12 +573,10 @@ private: return false; } - _LIBCPP_HIDE_FROM_ABI constexpr void __validate_fill_character(_CharT __fill, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr void __validate_fill_character(_CharT __fill) { // The forbidden fill characters all code points formed from a single code unit, thus the // check can be omitted when more code units are used. - if (__use_range_fill && (__fill == _CharT('{') || __fill == _CharT('}') || __fill == _CharT(':'))) - std::__throw_format_error("The fill option contains an invalid value"); - else if (__fill == _CharT('{') || __fill == _CharT('}')) + if (__fill == _CharT('{')) std::__throw_format_error("The fill option contains an invalid value"); } @@ -590,7 +587,7 @@ private: # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS || (same_as<_CharT, wchar_t> && sizeof(wchar_t) == 2) # endif - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " @@ -606,7 +603,7 @@ private: // The forbidden fill characters all are code points encoded // in one code unit, thus the check can be omitted when more // code units are used. - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); std::copy_n(__begin, __code_units, std::addressof(__fill_.__data[0])); __begin += __code_units + 1; @@ -623,7 +620,7 @@ private: # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template requires(same_as<_CharT, wchar_t> && sizeof(wchar_t) == 4) - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " @@ -632,7 +629,7 @@ private: if (!__unicode::__is_scalar_value(*__begin)) std::__throw_format_error("The fill option contains an invalid value"); - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); __fill_.__data[0] = *__begin; __begin += 2; @@ -651,14 +648,14 @@ private: # else // _LIBCPP_HAS_NO_UNICODE // range-fill and tuple-fill are identical template - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " "undefined behavior by evaluating data not in the input"); if (__begin + 1 != __end) { if (__parse_alignment(*(__begin + 1))) { - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); __fill_.__data[0] = *__begin; __begin += 2; @@ -1158,8 +1155,8 @@ __estimate_column_width(basic_string_view<_CharT> __str, size_t __maximum, __col // When Unicode isn't supported assume ASCII and every code unit is one code // point. In ASCII the estimated column width is always one. Thus there's no // need for rounding. - size_t __width_ = std::min(__str.size(), __maximum); - return {__width_, __str.begin() + __width_}; + size_t __width = std::min(__str.size(), __maximum); + return {__width, __str.begin() + __width}; } # endif // !defined(_LIBCPP_HAS_NO_UNICODE) diff --git a/lib/libcxx/include/__format/unicode.h b/lib/libcxx/include/__format/unicode.h index 40067ca344..de7d0fea1d 100644 --- a/lib/libcxx/include/__format/unicode.h +++ b/lib/libcxx/include/__format/unicode.h @@ -15,8 +15,10 @@ #include <__concepts/same_as.h> #include <__config> #include <__format/extended_grapheme_cluster_table.h> +#include <__format/indic_conjunct_break_table.h> #include <__iterator/concepts.h> #include <__iterator/readable_traits.h> // iter_value_t +#include <__utility/unreachable.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -292,84 +294,231 @@ private: }; # endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS -_LIBCPP_HIDE_FROM_ABI constexpr bool __at_extended_grapheme_cluster_break( - bool& __ri_break_allowed, - bool __has_extened_pictographic, - __extended_grapheme_custer_property_boundary::__property __prev, - __extended_grapheme_custer_property_boundary::__property __next) { - using __extended_grapheme_custer_property_boundary::__property; +// State machine to implement the Extended Grapheme Cluster Boundary +// +// The exact rules may change between Unicode versions. +// This implements the extended rules see +// https://www.unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries +class __extended_grapheme_cluster_break { + using __EGC_property = __extended_grapheme_custer_property_boundary::__property; + using __inCB_property = __indic_conjunct_break::__property; - __has_extened_pictographic |= __prev == __property::__Extended_Pictographic; - - // https://www.unicode.org/reports/tr29/tr29-39.html#Grapheme_Cluster_Boundary_Rules - - // *** Break at the start and end of text, unless the text is empty. *** - - _LIBCPP_ASSERT_INTERNAL(__prev != __property::__sot, "should be handled in the constructor"); // GB1 - _LIBCPP_ASSERT_INTERNAL(__prev != __property::__eot, "should be handled by our caller"); // GB2 - - // *** Do not break between a CR and LF. Otherwise, break before and after controls. *** - if (__prev == __property::__CR && __next == __property::__LF) // GB3 - return false; - - if (__prev == __property::__Control || __prev == __property::__CR || __prev == __property::__LF) // GB4 - return true; - - if (__next == __property::__Control || __next == __property::__CR || __next == __property::__LF) // GB5 - return true; - - // *** Do not break Hangul syllable sequences. *** - if (__prev == __property::__L && (__next == __property::__L || __next == __property::__V || - __next == __property::__LV || __next == __property::__LVT)) // GB6 - return false; - - if ((__prev == __property::__LV || __prev == __property::__V) && - (__next == __property::__V || __next == __property::__T)) // GB7 - return false; - - if ((__prev == __property::__LVT || __prev == __property::__T) && __next == __property::__T) // GB8 - return false; - - // *** Do not break before extending characters or ZWJ. *** - if (__next == __property::__Extend || __next == __property::__ZWJ) - return false; // GB9 - - // *** Do not break before SpacingMarks, or after Prepend characters. *** - if (__next == __property::__SpacingMark) // GB9a - return false; - - if (__prev == __property::__Prepend) // GB9b - return false; - - // *** Do not break within emoji modifier sequences or emoji zwj sequences. *** - - // GB11 \p{Extended_Pictographic} Extend* ZWJ x \p{Extended_Pictographic} - // - // Note that several parts of this rule are matched by GB9: Any x (Extend | ZWJ) - // - \p{Extended_Pictographic} x Extend - // - Extend x Extend - // - \p{Extended_Pictographic} x ZWJ - // - Extend x ZWJ - // - // So the only case left to test is - // - \p{Extended_Pictographic}' x ZWJ x \p{Extended_Pictographic} - // where \p{Extended_Pictographic}' is stored in __has_extened_pictographic - if (__has_extened_pictographic && __prev == __property::__ZWJ && __next == __property::__Extended_Pictographic) - return false; - - // *** Do not break within emoji flag sequences *** - - // That is, do not break between regional indicator (RI) symbols if there - // is an odd number of RI characters before the break point. - - if (__prev == __property::__Regional_Indicator && __next == __property::__Regional_Indicator) { // GB12 + GB13 - __ri_break_allowed = !__ri_break_allowed; - return __ri_break_allowed; +public: + _LIBCPP_HIDE_FROM_ABI constexpr explicit __extended_grapheme_cluster_break(char32_t __first_code_point) + : __prev_code_point_(__first_code_point), + __prev_property_(__extended_grapheme_custer_property_boundary::__get_property(__first_code_point)) { + // Initializes the active rule. + if (__prev_property_ == __EGC_property::__Extended_Pictographic) + __active_rule_ = __rule::__GB11_emoji; + else if (__prev_property_ == __EGC_property::__Regional_Indicator) + __active_rule_ = __rule::__GB12_GB13_regional_indicator; + else if (__indic_conjunct_break::__get_property(__first_code_point) == __inCB_property::__Consonant) + __active_rule_ = __rule::__GB9c_indic_conjunct_break; } - // *** Otherwise, break everywhere. *** - return true; // GB999 -} + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(char32_t __next_code_point) { + __EGC_property __next_property = __extended_grapheme_custer_property_boundary::__get_property(__next_code_point); + bool __result = __evaluate(__next_code_point, __next_property); + __prev_code_point_ = __next_code_point; + __prev_property_ = __next_property; + return __result; + } + + // The code point whose break propery are considered during the next + // evaluation cyle. + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr char32_t __current_code_point() const { return __prev_code_point_; } + +private: + // The naming of the identifiers matches the Unicode standard. + // NOLINTBEGIN(readability-identifier-naming) + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate(char32_t __next_code_point, __EGC_property __next_property) { + switch (__active_rule_) { + case __rule::__none: + return __evaluate_none(__next_code_point, __next_property); + case __rule::__GB9c_indic_conjunct_break: + return __evaluate_GB9c_indic_conjunct_break(__next_code_point, __next_property); + case __rule::__GB11_emoji: + return __evaluate_GB11_emoji(__next_code_point, __next_property); + case __rule::__GB12_GB13_regional_indicator: + return __evaluate_GB12_GB13_regional_indicator(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } + + _LIBCPP_HIDE_FROM_ABI constexpr bool __evaluate_none(char32_t __next_code_point, __EGC_property __next_property) { + // *** Break at the start and end of text, unless the text is empty. *** + + _LIBCPP_ASSERT_INTERNAL(__prev_property_ != __EGC_property::__sot, "should be handled in the constructor"); // GB1 + _LIBCPP_ASSERT_INTERNAL(__prev_property_ != __EGC_property::__eot, "should be handled by our caller"); // GB2 + + // *** Do not break between a CR and LF. Otherwise, break before and after controls. *** + if (__prev_property_ == __EGC_property::__CR && __next_property == __EGC_property::__LF) // GB3 + return false; + + if (__prev_property_ == __EGC_property::__Control || __prev_property_ == __EGC_property::__CR || + __prev_property_ == __EGC_property::__LF) // GB4 + return true; + + if (__next_property == __EGC_property::__Control || __next_property == __EGC_property::__CR || + __next_property == __EGC_property::__LF) // GB5 + return true; + + // *** Do not break Hangul syllable sequences. *** + if (__prev_property_ == __EGC_property::__L && + (__next_property == __EGC_property::__L || __next_property == __EGC_property::__V || + __next_property == __EGC_property::__LV || __next_property == __EGC_property::__LVT)) // GB6 + return false; + + if ((__prev_property_ == __EGC_property::__LV || __prev_property_ == __EGC_property::__V) && + (__next_property == __EGC_property::__V || __next_property == __EGC_property::__T)) // GB7 + return false; + + if ((__prev_property_ == __EGC_property::__LVT || __prev_property_ == __EGC_property::__T) && + __next_property == __EGC_property::__T) // GB8 + return false; + + // *** Do not break before extending characters or ZWJ. *** + if (__next_property == __EGC_property::__Extend || __next_property == __EGC_property::__ZWJ) + return false; // GB9 + + // *** Do not break before SpacingMarks, or after Prepend characters. *** + if (__next_property == __EGC_property::__SpacingMark) // GB9a + return false; + + if (__prev_property_ == __EGC_property::__Prepend) // GB9b + return false; + + // *** Do not break within certain combinations with Indic_Conjunct_Break (InCB)=Linker. *** + if (__indic_conjunct_break::__get_property(__next_code_point) == __inCB_property::__Consonant) { + __active_rule_ = __rule::__GB9c_indic_conjunct_break; + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + return true; + } + + // *** Do not break within emoji modifier sequences or emoji zwj sequences. *** + if (__next_property == __EGC_property::__Extended_Pictographic) { + __active_rule_ = __rule::__GB11_emoji; + __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + return true; + } + + // *** Do not break within emoji flag sequences *** + + // That is, do not break between regional indicator (RI) symbols if there + // is an odd number of RI characters before the break point. + if (__next_property == __EGC_property::__Regional_Indicator) { // GB12 + GB13 + __active_rule_ = __rule::__GB12_GB13_regional_indicator; + return true; + } + + // *** Otherwise, break everywhere. *** + return true; // GB999 + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB9c_indic_conjunct_break(char32_t __next_code_point, __EGC_property __next_property) { + __inCB_property __break = __indic_conjunct_break::__get_property(__next_code_point); + if (__break == __inCB_property::__none) { + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + + switch (__GB9c_indic_conjunct_break_state_) { + case __GB9c_indic_conjunct_break_state::__Consonant: + if (__break == __inCB_property::__Extend) { + return false; + } + if (__break == __inCB_property::__Linker) { + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Linker; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + + case __GB9c_indic_conjunct_break_state::__Linker: + if (__break == __inCB_property::__Extend) { + return false; + } + if (__break == __inCB_property::__Linker) { + return false; + } + if (__break == __inCB_property::__Consonant) { + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB11_emoji(char32_t __next_code_point, __EGC_property __next_property) { + switch (__GB11_emoji_state_) { + case __GB11_emoji_state::__Extended_Pictographic: + if (__next_property == __EGC_property::__Extend) { + __GB11_emoji_state_ = __GB11_emoji_state::__Extend; + return false; + } + [[fallthrough]]; + case __GB11_emoji_state::__Extend: + if (__next_property == __EGC_property::__ZWJ) { + __GB11_emoji_state_ = __GB11_emoji_state::__ZWJ; + return false; + } + if (__next_property == __EGC_property::__Extend) + return false; + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + + case __GB11_emoji_state::__ZWJ: + if (__next_property == __EGC_property::__Extended_Pictographic) { + __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB12_GB13_regional_indicator(char32_t __next_code_point, __EGC_property __next_property) { + __active_rule_ = __rule::__none; + if (__next_property == __EGC_property::__Regional_Indicator) + return false; + return __evaluate_none(__next_code_point, __next_property); + } + + char32_t __prev_code_point_; + __EGC_property __prev_property_; + + enum class __rule { + __none, + __GB9c_indic_conjunct_break, + __GB11_emoji, + __GB12_GB13_regional_indicator, + }; + __rule __active_rule_ = __rule::__none; + + enum class __GB11_emoji_state { + __Extended_Pictographic, + __Extend, + __ZWJ, + }; + __GB11_emoji_state __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + + enum class __GB9c_indic_conjunct_break_state { + __Consonant, + __Linker, + }; + + __GB9c_indic_conjunct_break_state __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + + // NOLINTEND(readability-identifier-naming) +}; /// Helper class to extract an extended grapheme cluster from a Unicode character range. /// @@ -382,9 +531,7 @@ class __extended_grapheme_cluster_view { public: _LIBCPP_HIDE_FROM_ABI constexpr explicit __extended_grapheme_cluster_view(_Iterator __first, _Iterator __last) - : __code_point_view_(__first, __last), - __next_code_point_(__code_point_view_.__consume().__code_point), - __next_prop_(__extended_grapheme_custer_property_boundary::__get_property(__next_code_point_)) {} + : __code_point_view_(__first, __last), __at_break_(__code_point_view_.__consume().__code_point) {} struct __cluster { /// The first code point of the extended grapheme cluster. @@ -400,44 +547,20 @@ public: _Iterator __last_; }; - _LIBCPP_HIDE_FROM_ABI constexpr __cluster __consume() { - _LIBCPP_ASSERT_INTERNAL(__next_prop_ != __extended_grapheme_custer_property_boundary::__property::__eot, - "can't move beyond the end of input"); - - char32_t __code_point = __next_code_point_; - if (!__code_point_view_.__at_end()) - return {__code_point, __get_break()}; - - __next_prop_ = __extended_grapheme_custer_property_boundary::__property::__eot; - return {__code_point, __code_point_view_.__position()}; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __cluster __consume() { + char32_t __code_point = __at_break_.__current_code_point(); + _Iterator __position = __code_point_view_.__position(); + while (!__code_point_view_.__at_end()) { + if (__at_break_(__code_point_view_.__consume().__code_point)) + break; + __position = __code_point_view_.__position(); + } + return {__code_point, __position}; } private: __code_point_view<_CharT> __code_point_view_; - - char32_t __next_code_point_; - __extended_grapheme_custer_property_boundary::__property __next_prop_; - - _LIBCPP_HIDE_FROM_ABI constexpr _Iterator __get_break() { - bool __ri_break_allowed = true; - bool __has_extened_pictographic = false; - while (true) { - _Iterator __result = __code_point_view_.__position(); - __extended_grapheme_custer_property_boundary::__property __prev = __next_prop_; - if (__code_point_view_.__at_end()) { - __next_prop_ = __extended_grapheme_custer_property_boundary::__property::__eot; - return __result; - } - __next_code_point_ = __code_point_view_.__consume().__code_point; - __next_prop_ = __extended_grapheme_custer_property_boundary::__get_property(__next_code_point_); - - __has_extened_pictographic |= - __prev == __extended_grapheme_custer_property_boundary::__property::__Extended_Pictographic; - - if (__at_extended_grapheme_cluster_break(__ri_break_allowed, __has_extened_pictographic, __prev, __next_prop_)) - return __result; - } - } + __extended_grapheme_cluster_break __at_break_; }; template diff --git a/lib/libcxx/include/__format/width_estimation_table.h b/lib/libcxx/include/__format/width_estimation_table.h index cfb488975d..11f61dea18 100644 --- a/lib/libcxx/include/__format/width_estimation_table.h +++ b/lib/libcxx/include/__format/width_estimation_table.h @@ -119,7 +119,7 @@ namespace __width_estimation_table { /// - bits [0, 13] The size of the range, allowing 16384 elements. /// - bits [14, 31] The lower bound code point of the range. The upper bound of /// the range is lower bound + size. -inline constexpr uint32_t __entries[108] = { +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[107] = { 0x0440005f /* 00001100 - 0000115f [ 96] */, // 0x08c68001 /* 0000231a - 0000231b [ 2] */, // 0x08ca4001 /* 00002329 - 0000232a [ 2] */, // @@ -158,14 +158,13 @@ inline constexpr uint32_t __entries[108] = { 0x0ba00019 /* 00002e80 - 00002e99 [ 26] */, // 0x0ba6c058 /* 00002e9b - 00002ef3 [ 89] */, // 0x0bc000d5 /* 00002f00 - 00002fd5 [ 214] */, // - 0x0bfc000b /* 00002ff0 - 00002ffb [ 12] */, // - 0x0c00003e /* 00003000 - 0000303e [ 63] */, // + 0x0bfc004e /* 00002ff0 - 0000303e [ 79] */, // 0x0c104055 /* 00003041 - 00003096 [ 86] */, // 0x0c264066 /* 00003099 - 000030ff [ 103] */, // 0x0c41402a /* 00003105 - 0000312f [ 43] */, // 0x0c4c405d /* 00003131 - 0000318e [ 94] */, // 0x0c640053 /* 00003190 - 000031e3 [ 84] */, // - 0x0c7c002e /* 000031f0 - 0000321e [ 47] */, // + 0x0c7bc02f /* 000031ef - 0000321e [ 48] */, // 0x0c880027 /* 00003220 - 00003247 [ 40] */, // 0x0c943fff /* 00003250 - 0000724f [16384] */, // 0x1c94323c /* 00007250 - 0000a48c [12861] */, // @@ -238,7 +237,7 @@ inline constexpr uint32_t __table_upper_bound = 0x0003fffd; /// Returns the estimated width of a Unicode code point. /// -/// \pre The code point is a valid Unicode code point. +/// \\pre The code point is a valid Unicode code point. [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int __estimated_width(const char32_t __code_point) noexcept { // Since __table_upper_bound contains the unshifted range do the // comparison without shifting. diff --git a/lib/libcxx/include/__format/write_escaped.h b/lib/libcxx/include/__format/write_escaped.h index 43a074dd8d..052ea98c3c 100644 --- a/lib/libcxx/include/__format/write_escaped.h +++ b/lib/libcxx/include/__format/write_escaped.h @@ -101,15 +101,27 @@ _LIBCPP_HIDE_FROM_ABI void __write_escape_ill_formed_code_unit(basic_string<_Cha } template -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool __is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +__is_escaped_sequence_written(basic_string<_CharT>& __str, bool __last_escaped, char32_t __value) { # ifdef _LIBCPP_HAS_NO_UNICODE // For ASCII assume everything above 127 is printable. if (__value > 127) return false; # endif + // [format.string.escaped]/2.2.1.2.1 + // CE is UTF-8, UTF-16, or UTF-32 and C corresponds to a Unicode scalar + // value whose Unicode property General_Category has a value in the groups + // Separator (Z) or Other (C), as described by UAX #44 of the Unicode Standard, if (!__escaped_output_table::__needs_escape(__value)) - return false; + // [format.string.escaped]/2.2.1.2.2 + // CE is UTF-8, UTF-16, or UTF-32 and C corresponds to a Unicode scalar + // value with the Unicode property Grapheme_Extend=Yes as described by UAX + // #44 of the Unicode Standard and C is not immediately preceded in S by a + // character P appended to E without translation to an escape sequence, + if (!__last_escaped || __extended_grapheme_custer_property_boundary::__get_property(__value) != + __extended_grapheme_custer_property_boundary::__property::__Extend) + return false; __formatter::__write_well_formed_escaped_code_unit(__str, __value); return true; @@ -124,8 +136,8 @@ enum class __escape_quotation_mark { __apostrophe, __double_quote }; // [format.string.escaped]/2 template -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool -__is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value, __escape_quotation_mark __mark) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool __is_escaped_sequence_written( + basic_string<_CharT>& __str, char32_t __value, bool __last_escaped, __escape_quotation_mark __mark) { // 2.2.1.1 - Mapped character in [tab:format.escape.sequences] switch (__value) { case _CharT('\t'): @@ -167,7 +179,7 @@ __is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value, __e // TODO FMT determine what to do with shift sequences. // 2.2.1.2.1 and 2.2.1.2.2 - Escape - return __formatter::__is_escaped_sequence_written(__str, __formatter::__to_char32(__value)); + return __formatter::__is_escaped_sequence_written(__str, __last_escaped, __formatter::__to_char32(__value)); } template @@ -175,11 +187,15 @@ _LIBCPP_HIDE_FROM_ABI void __escape(basic_string<_CharT>& __str, basic_string_view<_CharT> __values, __escape_quotation_mark __mark) { __unicode::__code_point_view<_CharT> __view{__values.begin(), __values.end()}; + // When the first code unit has the property Grapheme_Extend=Yes it needs to + // be escaped. This happens when the previous code unit was also escaped. + bool __escape = true; while (!__view.__at_end()) { auto __first = __view.__position(); typename __unicode::__consume_result __result = __view.__consume(); if (__result.__status == __unicode::__consume_result::__ok) { - if (!__formatter::__is_escaped_sequence_written(__str, __result.__code_point, __mark)) + __escape = __formatter::__is_escaped_sequence_written(__str, __result.__code_point, __escape, __mark); + if (!__escape) // 2.2.1.3 - Add the character ranges::copy(__first, __view.__position(), std::back_insert_iterator(__str)); } else { diff --git a/lib/libcxx/include/__functional/bind.h b/lib/libcxx/include/__functional/bind.h index 19e7d82155..b4f46441da 100644 --- a/lib/libcxx/include/__functional/bind.h +++ b/lib/libcxx/include/__functional/bind.h @@ -13,6 +13,7 @@ #include <__config> #include <__functional/invoke.h> #include <__functional/weak_result_type.h> +#include <__fwd/functional.h> #include <__type_traits/decay.h> #include <__type_traits/is_reference_wrapper.h> #include <__type_traits/is_void.h> @@ -94,7 +95,7 @@ __mu(_Ti& __ti, tuple<_Uj...>& __uj) { return std::__mu_expand(__ti, __uj, __indices()); } -template +template struct __mu_return2 {}; template @@ -104,8 +105,8 @@ struct __mu_return2 { template ::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __mu_return2<0 < is_placeholder<_Ti>::value, _Ti, _Uj>::type - __mu(_Ti&, _Uj& __uj) { +typename __mu_return2<0 < is_placeholder<_Ti>::value, _Ti, _Uj>::type +__mu(_Ti&, _Uj& __uj) { const size_t __indx = is_placeholder<_Ti>::value - 1; return std::forward::type>(std::get<__indx>(__uj)); } @@ -119,7 +120,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Ti& __mu(_Ti& __ti, return __ti; } -template +template struct __mu_return_impl; template @@ -224,8 +225,8 @@ public: template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __bind_return >::type - operator()(_Args&&... __args) const { + typename __bind_return >::type + operator()(_Args&&... __args) const { return std::__apply_functor(__f_, __bound_args_, __indices(), tuple<_Args&&...>(std::forward<_Args>(__args)...)); } }; diff --git a/lib/libcxx/include/__functional/bind_back.h b/lib/libcxx/include/__functional/bind_back.h index ce26d3b706..e44768d228 100644 --- a/lib/libcxx/include/__functional/bind_back.h +++ b/lib/libcxx/include/__functional/bind_back.h @@ -52,7 +52,7 @@ struct __bind_back_t : __perfect_forward<__bind_back_op template requires is_constructible_v, _Fn> && is_move_constructible_v> && - (is_constructible_v, _Args> && ...) && (is_move_constructible_v> && ...) + (is_constructible_v, _Args> && ...) && (is_move_constructible_v> && ...) _LIBCPP_HIDE_FROM_ABI constexpr auto __bind_back(_Fn&& __f, _Args&&... __args) noexcept( noexcept(__bind_back_t, tuple...>>( std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)))) @@ -62,6 +62,20 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __bind_back(_Fn&& __f, _Args&&... __args) n std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)); } +# if _LIBCPP_STD_VER >= 23 +template +_LIBCPP_HIDE_FROM_ABI constexpr auto bind_back(_Fn&& __f, _Args&&... __args) { + static_assert(is_constructible_v, _Fn>, "bind_back requires decay_t to be constructible from F"); + static_assert(is_move_constructible_v>, "bind_back requires decay_t to be move constructible"); + static_assert((is_constructible_v, _Args> && ...), + "bind_back requires all decay_t to be constructible from respective Args"); + static_assert((is_move_constructible_v> && ...), + "bind_back requires all decay_t to be move constructible"); + return __bind_back_t, tuple...>>( + std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)); +} +# endif // _LIBCPP_STD_VER >= 23 + #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__functional/bind_front.h b/lib/libcxx/include/__functional/bind_front.h index 30dda53361..87ef3affe8 100644 --- a/lib/libcxx/include/__functional/bind_front.h +++ b/lib/libcxx/include/__functional/bind_front.h @@ -17,7 +17,6 @@ #include <__type_traits/decay.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -30,9 +29,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD struct __bind_front_op { template - _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Args&&... __args) const - noexcept(noexcept(std::invoke(std::forward<_Args>(__args)...))) - -> decltype(std::invoke(std::forward<_Args>(__args)...)) { + _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Args&&... __args) const noexcept( + noexcept(std::invoke(std::forward<_Args>(__args)...))) -> decltype(std::invoke(std::forward<_Args>(__args)...)) { return std::invoke(std::forward<_Args>(__args)...); } }; diff --git a/lib/libcxx/include/__functional/function.h b/lib/libcxx/include/__functional/function.h index 416c26a0c7..c7b98035e3 100644 --- a/lib/libcxx/include/__functional/function.h +++ b/lib/libcxx/include/__functional/function.h @@ -28,7 +28,7 @@ #include <__type_traits/decay.h> #include <__type_traits/is_core_convertible.h> #include <__type_traits/is_scalar.h> -#include <__type_traits/is_trivially_copy_constructible.h> +#include <__type_traits/is_trivially_constructible.h> #include <__type_traits/is_trivially_destructible.h> #include <__type_traits/is_void.h> #include <__type_traits/strip_signature.h> @@ -55,7 +55,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD // bad_function_call _LIBCPP_DIAGNOSTIC_PUSH +# if !_LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wweak-vtables") +# endif class _LIBCPP_EXPORTED_FROM_ABI bad_function_call : public exception { public: _LIBCPP_HIDE_FROM_ABI bad_function_call() _NOEXCEPT = default; @@ -64,7 +66,7 @@ public: // Note that when a key function is not used, every translation unit that uses // bad_function_call will end up containing a weak definition of the vtable and // typeinfo. -# ifdef _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION +# if _LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION ~bad_function_call() _NOEXCEPT override; # else _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_function_call() _NOEXCEPT override {} @@ -230,10 +232,10 @@ class _LIBCPP_TEMPLATE_VIS __base; template class __base<_Rp(_ArgTypes...)> { - __base(const __base&); - __base& operator=(const __base&); - public: + __base(const __base&) = delete; + __base& operator=(const __base&) = delete; + _LIBCPP_HIDE_FROM_ABI __base() {} _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual ~__base() {} virtual __base* __clone() const = 0; @@ -514,7 +516,7 @@ struct __policy { } _LIBCPP_HIDE_FROM_ABI static const __policy* __create_empty() { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { nullptr, nullptr, true, @@ -541,7 +543,7 @@ private: template _LIBCPP_HIDE_FROM_ABI static const __policy* __choose_policy(/* is_small = */ false_type) { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { &__large_clone<_Fun>, &__large_destroy<_Fun>, false, @@ -556,7 +558,7 @@ private: template _LIBCPP_HIDE_FROM_ABI static const __policy* __choose_policy(/* is_small = */ true_type) { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { nullptr, nullptr, false, @@ -768,7 +770,7 @@ public: { } - virtual __base<_Rp(_ArgTypes...)>* __clone() const { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual __base<_Rp(_ArgTypes...)>* __clone() const { _LIBCPP_ASSERT_INTERNAL( false, "Block pointers are just pointers, so they should always fit into " @@ -777,9 +779,11 @@ public: return nullptr; } - virtual void __clone(__base<_Rp(_ArgTypes...)>* __p) const { ::new ((void*)__p) __func(__f_); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void __clone(__base<_Rp(_ArgTypes...)>* __p) const { + ::new ((void*)__p) __func(__f_); + } - virtual void destroy() _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy() _NOEXCEPT { # ifndef _LIBCPP_HAS_OBJC_ARC if (__f_) _Block_release(__f_); @@ -787,7 +791,7 @@ public: __f_ = 0; } - virtual void destroy_deallocate() _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy_deallocate() _NOEXCEPT { _LIBCPP_ASSERT_INTERNAL( false, "Block pointers are just pointers, so they should always fit into " @@ -795,16 +799,20 @@ public: "never be invoked."); } - virtual _Rp operator()(_ArgTypes&&... __arg) { return std::__invoke(__f_, std::forward<_ArgTypes>(__arg)...); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual _Rp operator()(_ArgTypes&&... __arg) { + return std::__invoke(__f_, std::forward<_ArgTypes>(__arg)...); + } # ifndef _LIBCPP_HAS_NO_RTTI - virtual const void* target(type_info const& __ti) const _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual const void* target(type_info const& __ti) const _NOEXCEPT { if (__ti == typeid(__func::__block_type)) return &__f_; return (const void*)nullptr; } - virtual const std::type_info& target_type() const _NOEXCEPT { return typeid(__func::__block_type); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual const std::type_info& target_type() const _NOEXCEPT { + return typeid(__func::__block_type); + } # endif // _LIBCPP_HAS_NO_RTTI }; diff --git a/lib/libcxx/include/__functional/hash.h b/lib/libcxx/include/__functional/hash.h index ff22055d69..a9e450edd3 100644 --- a/lib/libcxx/include/__functional/hash.h +++ b/lib/libcxx/include/__functional/hash.h @@ -10,23 +10,18 @@ #define _LIBCPP___FUNCTIONAL_HASH_H #include <__config> -#include <__functional/invoke.h> #include <__functional/unary_function.h> -#include <__fwd/hash.h> -#include <__tuple/sfinae_helpers.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> +#include <__fwd/functional.h> +#include <__type_traits/conjunction.h> +#include <__type_traits/invoke.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_enum.h> -#include <__type_traits/is_move_constructible.h> #include <__type_traits/underlying_type.h> -#include <__utility/forward.h> -#include <__utility/move.h> #include <__utility/pair.h> #include <__utility/swap.h> #include #include #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__functional/identity.h b/lib/libcxx/include/__functional/identity.h index 7fbfc6c624..8468de3dae 100644 --- a/lib/libcxx/include/__functional/identity.h +++ b/lib/libcxx/include/__functional/identity.h @@ -11,7 +11,7 @@ #define _LIBCPP___FUNCTIONAL_IDENTITY_H #include <__config> -#include <__functional/reference_wrapper.h> +#include <__fwd/functional.h> #include <__type_traits/integral_constant.h> #include <__utility/forward.h> @@ -44,7 +44,7 @@ struct __is_identity > : true_type {}; struct identity { template - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp&& operator()(_Tp&& __t) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp&& operator()(_Tp&& __t) const noexcept { return std::forward<_Tp>(__t); } diff --git a/lib/libcxx/include/__functional/is_transparent.h b/lib/libcxx/include/__functional/is_transparent.h index 13fc94f71c..b2d62f2e3e 100644 --- a/lib/libcxx/include/__functional/is_transparent.h +++ b/lib/libcxx/include/__functional/is_transparent.h @@ -11,7 +11,6 @@ #define _LIBCPP___FUNCTIONAL_IS_TRANSPARENT #include <__config> -#include <__type_traits/integral_constant.h> #include <__type_traits/void_t.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -23,10 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 14 template -struct __is_transparent : false_type {}; +inline const bool __is_transparent_v = false; template -struct __is_transparent<_Tp, _Up, __void_t > : true_type {}; +inline const bool __is_transparent_v<_Tp, _Up, __void_t > = true; #endif diff --git a/lib/libcxx/include/__functional/mem_fn.h b/lib/libcxx/include/__functional/mem_fn.h index 349a6ce3a7..ee07a71774 100644 --- a/lib/libcxx/include/__functional/mem_fn.h +++ b/lib/libcxx/include/__functional/mem_fn.h @@ -38,8 +38,8 @@ public: template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __invoke_return::type - operator()(_ArgTypes&&... __args) const { + typename __invoke_return::type + operator()(_ArgTypes&&... __args) const { return std::__invoke(__f_, std::forward<_ArgTypes>(__args)...); } }; diff --git a/lib/libcxx/include/__functional/mem_fun_ref.h b/lib/libcxx/include/__functional/mem_fun_ref.h index fe43c46560..c344420b02 100644 --- a/lib/libcxx/include/__functional/mem_fun_ref.h +++ b/lib/libcxx/include/__functional/mem_fun_ref.h @@ -89,8 +89,8 @@ public: }; template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 const_mem_fun1_t - : public __binary_function { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 const_mem_fun1_t : public __binary_function { _Sp (_Tp::*__p_)(_Ap) const; public: diff --git a/lib/libcxx/include/__functional/not_fn.h b/lib/libcxx/include/__functional/not_fn.h index 23a491c135..4b3ce5524a 100644 --- a/lib/libcxx/include/__functional/not_fn.h +++ b/lib/libcxx/include/__functional/not_fn.h @@ -16,7 +16,6 @@ #include <__type_traits/decay.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__functional/operations.h b/lib/libcxx/include/__functional/operations.h index 7ddc00650f..0a6320f19d 100644 --- a/lib/libcxx/include/__functional/operations.h +++ b/lib/libcxx/include/__functional/operations.h @@ -13,8 +13,7 @@ #include <__config> #include <__functional/binary_function.h> #include <__functional/unary_function.h> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -41,18 +40,18 @@ _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(plus); // The non-transparent std::plus specialization is only equivalent to a raw plus // operator when we don't perform an implicit conversion when calling it. template -struct __desugars_to<__plus_tag, plus<_Tp>, _Tp, _Tp> : true_type {}; +inline const bool __desugars_to_v<__plus_tag, plus<_Tp>, _Tp, _Tp> = true; template -struct __desugars_to<__plus_tag, plus, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__plus_tag, plus, _Tp, _Up> = true; #if _LIBCPP_STD_VER >= 14 template <> struct _LIBCPP_TEMPLATE_VIS plus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) + std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) + std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) + std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) + std::forward<_T2>(__u)) { return std::forward<_T1>(__t) + std::forward<_T2>(__u); } typedef void is_transparent; @@ -77,8 +76,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS minus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) - std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) - std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) - std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) - std::forward<_T2>(__u)) { return std::forward<_T1>(__t) - std::forward<_T2>(__u); } typedef void is_transparent; @@ -103,8 +102,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS multiplies { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) * std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) * std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) * std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) * std::forward<_T2>(__u)) { return std::forward<_T1>(__t) * std::forward<_T2>(__u); } typedef void is_transparent; @@ -129,8 +128,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS divides { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) / std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) / std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) / std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) / std::forward<_T2>(__u)) { return std::forward<_T1>(__t) / std::forward<_T2>(__u); } typedef void is_transparent; @@ -155,8 +154,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS modulus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) % std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) % std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) % std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) % std::forward<_T2>(__u)) { return std::forward<_T1>(__t) % std::forward<_T2>(__u); } typedef void is_transparent; @@ -179,7 +178,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS negate { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(-std::forward<_Tp>(__x))) -> decltype(-std::forward<_Tp>(__x)) { + noexcept(noexcept(-std::forward<_Tp>(__x))) // + -> decltype(-std::forward<_Tp>(__x)) { return -std::forward<_Tp>(__x); } typedef void is_transparent; @@ -206,8 +206,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_and { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) & std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) & std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) & + std::forward<_T2>(__u))) -> decltype(std::forward<_T1>(__t) & std::forward<_T2>(__u)) { return std::forward<_T1>(__t) & std::forward<_T2>(__u); } typedef void is_transparent; @@ -225,7 +225,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_not { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(~std::forward<_Tp>(__x))) -> decltype(~std::forward<_Tp>(__x)) { + noexcept(noexcept(~std::forward<_Tp>(__x))) // + -> decltype(~std::forward<_Tp>(__x)) { return ~std::forward<_Tp>(__x); } typedef void is_transparent; @@ -250,8 +251,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_or { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) | std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) | std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) | std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) | std::forward<_T2>(__u)) { return std::forward<_T1>(__t) | std::forward<_T2>(__u); } typedef void is_transparent; @@ -276,8 +277,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_xor { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) ^ std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) ^ std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) ^ std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) ^ std::forward<_T2>(__u)) { return std::forward<_T1>(__t) ^ std::forward<_T2>(__u); } typedef void is_transparent; @@ -304,8 +305,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS equal_to { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) == std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) == std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) == std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) == std::forward<_T2>(__u)) { return std::forward<_T1>(__t) == std::forward<_T2>(__u); } typedef void is_transparent; @@ -315,11 +316,11 @@ struct _LIBCPP_TEMPLATE_VIS equal_to { // The non-transparent std::equal_to specialization is only equivalent to a raw equality // comparison when we don't perform an implicit conversion when calling it. template -struct __desugars_to<__equal_tag, equal_to<_Tp>, _Tp, _Tp> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, equal_to<_Tp>, _Tp, _Tp> = true; // In the transparent case, we do not enforce that template -struct __desugars_to<__equal_tag, equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, equal_to, _Tp, _Up> = true; #if _LIBCPP_STD_VER >= 14 template @@ -339,8 +340,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS not_equal_to { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) != std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) != std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) != std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) != std::forward<_T2>(__u)) { return std::forward<_T1>(__t) != std::forward<_T2>(__u); } typedef void is_transparent; @@ -360,17 +361,23 @@ struct _LIBCPP_TEMPLATE_VIS less : __binary_function<_Tp, _Tp, bool> { }; _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(less); +template +inline const bool __desugars_to_v<__less_tag, less<_Tp>, _Tp, _Tp> = true; + #if _LIBCPP_STD_VER >= 14 template <> struct _LIBCPP_TEMPLATE_VIS less { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) < std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) < std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) < std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) < std::forward<_T2>(__u)) { return std::forward<_T1>(__t) < std::forward<_T2>(__u); } typedef void is_transparent; }; + +template +inline const bool __desugars_to_v<__less_tag, less<>, _Tp, _Tp> = true; #endif #if _LIBCPP_STD_VER >= 14 @@ -391,8 +398,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS less_equal { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) <= std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) <= std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) <= std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) <= std::forward<_T2>(__u)) { return std::forward<_T1>(__t) <= std::forward<_T2>(__u); } typedef void is_transparent; @@ -417,8 +424,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS greater_equal { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) >= std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) >= std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) >= + std::forward<_T2>(__u))) -> decltype(std::forward<_T1>(__t) >= std::forward<_T2>(__u)) { return std::forward<_T1>(__t) >= std::forward<_T2>(__u); } typedef void is_transparent; @@ -443,8 +450,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS greater { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) > std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) > std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) > std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) > std::forward<_T2>(__u)) { return std::forward<_T1>(__t) > std::forward<_T2>(__u); } typedef void is_transparent; @@ -471,8 +478,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_and { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) && std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) && std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) && std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) && std::forward<_T2>(__u)) { return std::forward<_T1>(__t) && std::forward<_T2>(__u); } typedef void is_transparent; @@ -495,7 +502,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_not { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(!std::forward<_Tp>(__x))) -> decltype(!std::forward<_Tp>(__x)) { + noexcept(noexcept(!std::forward<_Tp>(__x))) // + -> decltype(!std::forward<_Tp>(__x)) { return !std::forward<_Tp>(__x); } typedef void is_transparent; @@ -520,8 +528,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_or { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) || std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) || std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) || std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) || std::forward<_T2>(__u)) { return std::forward<_T1>(__t) || std::forward<_T2>(__u); } typedef void is_transparent; diff --git a/lib/libcxx/include/__functional/pointer_to_binary_function.h b/lib/libcxx/include/__functional/pointer_to_binary_function.h index 51a7c3fe0f..e345250dcd 100644 --- a/lib/libcxx/include/__functional/pointer_to_binary_function.h +++ b/lib/libcxx/include/__functional/pointer_to_binary_function.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_BINDERS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 pointer_to_binary_function - : public __binary_function<_Arg1, _Arg2, _Result> { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 pointer_to_binary_function : public __binary_function<_Arg1, _Arg2, _Result> { _Result (*__f_)(_Arg1, _Arg2); public: diff --git a/lib/libcxx/include/__functional/pointer_to_unary_function.h b/lib/libcxx/include/__functional/pointer_to_unary_function.h index 0338e76717..3a5d153d36 100644 --- a/lib/libcxx/include/__functional/pointer_to_unary_function.h +++ b/lib/libcxx/include/__functional/pointer_to_unary_function.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_BINDERS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 pointer_to_unary_function - : public __unary_function<_Arg, _Result> { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 pointer_to_unary_function : public __unary_function<_Arg, _Result> { _Result (*__f_)(_Arg); public: diff --git a/lib/libcxx/include/__functional/ranges_operations.h b/lib/libcxx/include/__functional/ranges_operations.h index 38b2801804..27f06eadd0 100644 --- a/lib/libcxx/include/__functional/ranges_operations.h +++ b/lib/libcxx/include/__functional/ranges_operations.h @@ -13,8 +13,7 @@ #include <__concepts/equality_comparable.h> #include <__concepts/totally_ordered.h> #include <__config> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -98,7 +97,10 @@ struct greater_equal { // For ranges we do not require that the types on each side of the equality // operator are of the same type template -struct __desugars_to<__equal_tag, ranges::equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, ranges::equal_to, _Tp, _Up> = true; + +template +inline const bool __desugars_to_v<__less_tag, ranges::less, _Tp, _Up> = true; #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__functional/reference_wrapper.h b/lib/libcxx/include/__functional/reference_wrapper.h index 54de06a887..3570e2673c 100644 --- a/lib/libcxx/include/__functional/reference_wrapper.h +++ b/lib/libcxx/include/__functional/reference_wrapper.h @@ -10,12 +10,16 @@ #ifndef _LIBCPP___FUNCTIONAL_REFERENCE_WRAPPER_H #define _LIBCPP___FUNCTIONAL_REFERENCE_WRAPPER_H +#include <__compare/synth_three_way.h> +#include <__concepts/boolean_testable.h> #include <__config> #include <__functional/invoke.h> #include <__functional/weak_result_type.h> #include <__memory/addressof.h> #include <__type_traits/enable_if.h> +#include <__type_traits/is_const.h> #include <__type_traits/remove_cvref.h> +#include <__type_traits/void_t.h> #include <__utility/declval.h> #include <__utility/forward.h> @@ -35,12 +39,12 @@ private: type* __f_; static void __fun(_Tp&) _NOEXCEPT; - static void __fun(_Tp&&) = delete; + static void __fun(_Tp&&) = delete; // NOLINT(modernize-use-equals-delete) ; This is llvm.org/PR54276 public: - template < - class _Up, - class = __enable_if_t::value, decltype(__fun(std::declval<_Up>())) > > + template ()))>, + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 reference_wrapper(_Up&& __u) _NOEXCEPT_(noexcept(__fun(std::declval<_Up>()))) { type& __f = static_cast<_Up&&>(__u); @@ -63,6 +67,54 @@ public: { return std::__invoke(get(), std::forward<_ArgTypes>(__args)...); } + +#if _LIBCPP_STD_VER >= 26 + + // [refwrap.comparisons], comparisons + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, reference_wrapper __y) + requires requires { + { __x.get() == __y.get() } -> __boolean_testable; + } + { + return __x.get() == __y.get(); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, const _Tp& __y) + requires requires { + { __x.get() == __y } -> __boolean_testable; + } + { + return __x.get() == __y; + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, reference_wrapper __y) + requires(!is_const_v<_Tp>) && requires { + { __x.get() == __y.get() } -> __boolean_testable; + } + { + return __x.get() == __y.get(); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, reference_wrapper __y) + requires requires { std::__synth_three_way(__x.get(), __y.get()); } + { + return std::__synth_three_way(__x.get(), __y.get()); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, const _Tp& __y) + requires requires { std::__synth_three_way(__x.get(), __y); } + { + return std::__synth_three_way(__x.get(), __y); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, reference_wrapper __y) + requires(!is_const_v<_Tp>) && requires { std::__synth_three_way(__x.get(), __y.get()); } + { + return std::__synth_three_way(__x.get(), __y.get()); + } + +#endif // _LIBCPP_STD_VER >= 26 }; #if _LIBCPP_STD_VER >= 17 diff --git a/lib/libcxx/include/__functional/unary_negate.h b/lib/libcxx/include/__functional/unary_negate.h index d130b7d728..5bd487a97b 100644 --- a/lib/libcxx/include/__functional/unary_negate.h +++ b/lib/libcxx/include/__functional/unary_negate.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX17 unary_negate - : public __unary_function { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX17 unary_negate : public __unary_function { _Predicate __pred_; public: diff --git a/lib/libcxx/include/__fwd/array.h b/lib/libcxx/include/__fwd/array.h index 9a79effb61..b429d0c5a9 100644 --- a/lib/libcxx/include/__fwd/array.h +++ b/lib/libcxx/include/__fwd/array.h @@ -21,6 +21,26 @@ _LIBCPP_BEGIN_NAMESPACE_STD template struct _LIBCPP_TEMPLATE_VIS array; +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp& get(array<_Tp, _Size>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& get(const array<_Tp, _Size>&) _NOEXCEPT; + +#ifndef _LIBCPP_CXX03_LANG +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp&& get(array<_Tp, _Size>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp&& get(const array<_Tp, _Size>&&) _NOEXCEPT; +#endif + +template +struct __is_std_array : false_type {}; + +template +struct __is_std_array > : true_type {}; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___FWD_ARRAY_H diff --git a/lib/libcxx/include/__fwd/complex.h b/lib/libcxx/include/__fwd/complex.h new file mode 100644 index 0000000000..22c78c5cc3 --- /dev/null +++ b/lib/libcxx/include/__fwd/complex.h @@ -0,0 +1,42 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_COMPLEX_H +#define _LIBCPP___FWD_COMPLEX_H + +#include <__config> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +class _LIBCPP_TEMPLATE_VIS complex; + +#if _LIBCPP_STD_VER >= 26 + +template +_LIBCPP_HIDE_FROM_ABI constexpr _Tp& get(complex<_Tp>&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr _Tp&& get(complex<_Tp>&&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr const _Tp& get(const complex<_Tp>&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr const _Tp&& get(const complex<_Tp>&&) noexcept; + +#endif // _LIBCPP_STD_VER >= 26 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_COMPLEX_H diff --git a/lib/libcxx/include/__fwd/deque.h b/lib/libcxx/include/__fwd/deque.h new file mode 100644 index 0000000000..fd2fb5bb4b --- /dev/null +++ b/lib/libcxx/include/__fwd/deque.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_DEQUE_H +#define _LIBCPP___FWD_DEQUE_H + +#include <__config> +#include <__fwd/memory.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS deque; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_DEQUE_H diff --git a/lib/libcxx/include/__format/format_fwd.h b/lib/libcxx/include/__fwd/format.h similarity index 86% rename from lib/libcxx/include/__format/format_fwd.h rename to lib/libcxx/include/__fwd/format.h index 120b2fc8d4..b30c220f8a 100644 --- a/lib/libcxx/include/__format/format_fwd.h +++ b/lib/libcxx/include/__fwd/format.h @@ -7,10 +7,9 @@ // //===----------------------------------------------------------------------===// -#ifndef _LIBCPP___FORMAT_FORMAT_FWD_H -#define _LIBCPP___FORMAT_FORMAT_FWD_H +#ifndef _LIBCPP___FWD_FORMAT_H +#define _LIBCPP___FWD_FORMAT_H -#include <__availability> #include <__config> #include <__iterator/concepts.h> @@ -36,4 +35,4 @@ struct _LIBCPP_TEMPLATE_VIS formatter; _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP___FORMAT_FORMAT_FWD_H +#endif // _LIBCPP___FWD_FORMAT_H diff --git a/lib/libcxx/include/__fwd/functional.h b/lib/libcxx/include/__fwd/functional.h new file mode 100644 index 0000000000..32c9ef33e4 --- /dev/null +++ b/lib/libcxx/include/__fwd/functional.h @@ -0,0 +1,28 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_FUNCTIONAL_H +#define _LIBCPP___FWD_FUNCTIONAL_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +struct _LIBCPP_TEMPLATE_VIS hash; + +template +class _LIBCPP_TEMPLATE_VIS reference_wrapper; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_FUNCTIONAL_H diff --git a/lib/libcxx/include/__fwd/get.h b/lib/libcxx/include/__fwd/get.h deleted file mode 100644 index e7261b8269..0000000000 --- a/lib/libcxx/include/__fwd/get.h +++ /dev/null @@ -1,99 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___FWD_GET_H -#define _LIBCPP___FWD_GET_H - -#include <__concepts/copyable.h> -#include <__config> -#include <__fwd/array.h> -#include <__fwd/pair.h> -#include <__fwd/subrange.h> -#include <__fwd/tuple.h> -#include <__tuple/tuple_element.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_BEGIN_NAMESPACE_STD - -#ifndef _LIBCPP_CXX03_LANG - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type& -get(tuple<_Tp...>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type& -get(const tuple<_Tp...>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type&& -get(tuple<_Tp...>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type&& -get(const tuple<_Tp...>&&) _NOEXCEPT; - -#endif //_LIBCPP_CXX03_LANG - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type& -get(pair<_T1, _T2>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type& -get(const pair<_T1, _T2>&) _NOEXCEPT; - -#ifndef _LIBCPP_CXX03_LANG -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type&& -get(pair<_T1, _T2>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type&& -get(const pair<_T1, _T2>&&) _NOEXCEPT; -#endif - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp& get(array<_Tp, _Size>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& get(const array<_Tp, _Size>&) _NOEXCEPT; - -#ifndef _LIBCPP_CXX03_LANG -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp&& get(array<_Tp, _Size>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp&& get(const array<_Tp, _Size>&&) _NOEXCEPT; -#endif - -#if _LIBCPP_STD_VER >= 20 - -namespace ranges { - -template - requires((_Index == 0 && copyable<_Iter>) || _Index == 1) -_LIBCPP_HIDE_FROM_ABI constexpr auto get(const subrange<_Iter, _Sent, _Kind>& __subrange); - -template - requires(_Index < 2) -_LIBCPP_HIDE_FROM_ABI constexpr auto get(subrange<_Iter, _Sent, _Kind>&& __subrange); - -} // namespace ranges - -using ranges::get; - -#endif // _LIBCPP_STD_VER >= 20 - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP___FWD_GET_H diff --git a/lib/libcxx/include/__fwd/ios.h b/lib/libcxx/include/__fwd/ios.h index 82c865d58c..48350709d4 100644 --- a/lib/libcxx/include/__fwd/ios.h +++ b/lib/libcxx/include/__fwd/ios.h @@ -18,6 +18,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD +class _LIBCPP_EXPORTED_FROM_ABI ios_base; + template > class _LIBCPP_TEMPLATE_VIS basic_ios; diff --git a/lib/libcxx/include/__fwd/hash.h b/lib/libcxx/include/__fwd/memory.h similarity index 77% rename from lib/libcxx/include/__fwd/hash.h rename to lib/libcxx/include/__fwd/memory.h index af9eca876a..b9e151855a 100644 --- a/lib/libcxx/include/__fwd/hash.h +++ b/lib/libcxx/include/__fwd/memory.h @@ -6,8 +6,8 @@ // //===---------------------------------------------------------------------===// -#ifndef _LIBCPP___FWD_HASH_H -#define _LIBCPP___FWD_HASH_H +#ifndef _LIBCPP___FWD_MEMORY_H +#define _LIBCPP___FWD_MEMORY_H #include <__config> @@ -17,9 +17,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct _LIBCPP_TEMPLATE_VIS hash; +template +class _LIBCPP_TEMPLATE_VIS allocator; _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP___FWD_HASH_H +#endif // _LIBCPP___FWD_MEMORY_H diff --git a/lib/libcxx/include/__fwd/memory_resource.h b/lib/libcxx/include/__fwd/memory_resource.h index 03b78ad2bd..d68b2c2b63 100644 --- a/lib/libcxx/include/__fwd/memory_resource.h +++ b/lib/libcxx/include/__fwd/memory_resource.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___FWD_MEMORY_RESOURCE_H #define _LIBCPP___FWD_MEMORY_RESOURCE_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__fwd/pair.h b/lib/libcxx/include/__fwd/pair.h index 3844014de3..af32628fe1 100644 --- a/lib/libcxx/include/__fwd/pair.h +++ b/lib/libcxx/include/__fwd/pair.h @@ -10,6 +10,8 @@ #define _LIBCPP___FWD_PAIR_H #include <__config> +#include <__fwd/tuple.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -20,6 +22,24 @@ _LIBCPP_BEGIN_NAMESPACE_STD template struct _LIBCPP_TEMPLATE_VIS pair; +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type& +get(pair<_T1, _T2>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type& +get(const pair<_T1, _T2>&) _NOEXCEPT; + +#ifndef _LIBCPP_CXX03_LANG +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type&& +get(pair<_T1, _T2>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type&& +get(const pair<_T1, _T2>&&) _NOEXCEPT; +#endif + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___FWD_PAIR_H diff --git a/lib/libcxx/include/__fwd/queue.h b/lib/libcxx/include/__fwd/queue.h new file mode 100644 index 0000000000..50d99ad9c2 --- /dev/null +++ b/lib/libcxx/include/__fwd/queue.h @@ -0,0 +1,31 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_QUEUE_H +#define _LIBCPP___FWD_QUEUE_H + +#include <__config> +#include <__functional/operations.h> +#include <__fwd/deque.h> +#include <__fwd/vector.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS queue; + +template , class _Compare = less > +class _LIBCPP_TEMPLATE_VIS priority_queue; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_QUEUE_H diff --git a/lib/libcxx/include/__fwd/sstream.h b/lib/libcxx/include/__fwd/sstream.h index e2d46fbe1d..39a9c3faf1 100644 --- a/lib/libcxx/include/__fwd/sstream.h +++ b/lib/libcxx/include/__fwd/sstream.h @@ -10,6 +10,7 @@ #define _LIBCPP___FWD_SSTREAM_H #include <__config> +#include <__fwd/memory.h> #include <__fwd/string.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__fwd/stack.h b/lib/libcxx/include/__fwd/stack.h new file mode 100644 index 0000000000..7dab6c1a4f --- /dev/null +++ b/lib/libcxx/include/__fwd/stack.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_STACK_H +#define _LIBCPP___FWD_STACK_H + +#include <__config> +#include <__fwd/deque.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS stack; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_STACK_H diff --git a/lib/libcxx/include/__fwd/string.h b/lib/libcxx/include/__fwd/string.h index 032132374d..2418e1f9b2 100644 --- a/lib/libcxx/include/__fwd/string.h +++ b/lib/libcxx/include/__fwd/string.h @@ -9,8 +9,8 @@ #ifndef _LIBCPP___FWD_STRING_H #define _LIBCPP___FWD_STRING_H -#include <__availability> #include <__config> +#include <__fwd/memory.h> #include <__fwd/memory_resource.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -39,9 +39,6 @@ template <> struct char_traits; #endif -template -class _LIBCPP_TEMPLATE_VIS allocator; - template , class _Allocator = allocator<_CharT> > class _LIBCPP_TEMPLATE_VIS basic_string; diff --git a/lib/libcxx/include/__fwd/subrange.h b/lib/libcxx/include/__fwd/subrange.h index d09b9b1c5b..60a41da23d 100644 --- a/lib/libcxx/include/__fwd/subrange.h +++ b/lib/libcxx/include/__fwd/subrange.h @@ -9,7 +9,10 @@ #ifndef _LIBCPP___FWD_SUBRANGE_H #define _LIBCPP___FWD_SUBRANGE_H +#include <__concepts/copyable.h> #include <__config> +#include <__iterator/concepts.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -17,8 +20,6 @@ #if _LIBCPP_STD_VER >= 20 -# include <__iterator/concepts.h> - _LIBCPP_BEGIN_NAMESPACE_STD namespace ranges { @@ -29,8 +30,18 @@ template _Sent, subrange_ki requires(_Kind == subrange_kind::sized || !sized_sentinel_for<_Sent, _Iter>) class _LIBCPP_TEMPLATE_VIS subrange; +template + requires((_Index == 0 && copyable<_Iter>) || _Index == 1) +_LIBCPP_HIDE_FROM_ABI constexpr auto get(const subrange<_Iter, _Sent, _Kind>&); + +template + requires(_Index < 2) +_LIBCPP_HIDE_FROM_ABI constexpr auto get(subrange<_Iter, _Sent, _Kind>&&); + } // namespace ranges +using ranges::get; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__fwd/tuple.h b/lib/libcxx/include/__fwd/tuple.h index 16b3fabbb9..902770c295 100644 --- a/lib/libcxx/include/__fwd/tuple.h +++ b/lib/libcxx/include/__fwd/tuple.h @@ -10,6 +10,7 @@ #define _LIBCPP___FWD_TUPLE_H #include <__config> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -17,11 +18,33 @@ _LIBCPP_BEGIN_NAMESPACE_STD +template +struct _LIBCPP_TEMPLATE_VIS tuple_element; + #ifndef _LIBCPP_CXX03_LANG template class _LIBCPP_TEMPLATE_VIS tuple; +template +struct _LIBCPP_TEMPLATE_VIS tuple_size; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type& +get(tuple<_Tp...>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type& +get(const tuple<_Tp...>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type&& +get(tuple<_Tp...>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type&& +get(const tuple<_Tp...>&&) _NOEXCEPT; + #endif // _LIBCPP_CXX03_LANG _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__fwd/vector.h b/lib/libcxx/include/__fwd/vector.h new file mode 100644 index 0000000000..c9cc961374 --- /dev/null +++ b/lib/libcxx/include/__fwd/vector.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_VECTOR_H +#define _LIBCPP___FWD_VECTOR_H + +#include <__config> +#include <__fwd/memory.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS vector; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_VECTOR_H diff --git a/lib/libcxx/include/__hash_table b/lib/libcxx/include/__hash_table index 1342001200..0257585285 100644 --- a/lib/libcxx/include/__hash_table +++ b/lib/libcxx/include/__hash_table @@ -28,12 +28,9 @@ #include <__type_traits/can_extract_key.h> #include <__type_traits/conditional.h> #include <__type_traits/is_const.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> #include <__type_traits/is_nothrow_constructible.h> -#include <__type_traits/is_nothrow_copy_constructible.h> -#include <__type_traits/is_nothrow_default_constructible.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> #include <__type_traits/is_pointer.h> #include <__type_traits/is_reference.h> #include <__type_traits/is_swappable.h> @@ -243,9 +240,9 @@ public: private: static_assert(!is_const<__node_type>::value, "_NodePtr should never be a pointer to const"); - static_assert((is_same::element_type, void>::value), + static_assert(is_same::element_type, void>::value, "_VoidPtr does not point to unqualified void type"); - static_assert((is_same<__rebind_pointer_t<_VoidPtr, __node_type>, _NodePtr>::value), + static_assert(is_same<__rebind_pointer_t<_VoidPtr, __node_type>, _NodePtr>::value, "_VoidPtr does not rebind to _NodePtr."); }; @@ -284,13 +281,21 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_iterator() _NOEXCEPT : __node_(nullptr) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container iterator"); __node_ = __node_->__next_; return *this; } @@ -345,12 +350,20 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_const_iterator(const __non_const_iterator& __x) _NOEXCEPT : __node_(__x.__node_) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_const_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container const_iterator"); __node_ = __node_->__next_; return *this; } @@ -400,13 +413,21 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_local_iterator() _NOEXCEPT : __node_(nullptr) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container local_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container local_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_local_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container local_iterator"); __node_ = __node_->__next_; if (__node_ != nullptr && std::__constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) __node_ = nullptr; @@ -475,13 +496,21 @@ public: __bucket_(__x.__bucket_), __bucket_count_(__x.__bucket_count_) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_const_local_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container const_local_iterator"); __node_ = __node_->__next_; if (__node_ != nullptr && std::__constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) __node_ = nullptr; @@ -671,11 +700,11 @@ private: // check for sane allocator pointer rebinding semantics. Rebinding the // allocator for a new pointer type should be exactly the same as rebinding // the pointer using 'pointer_traits'. - static_assert((is_same<__node_pointer, typename __node_traits::pointer>::value), + static_assert(is_same<__node_pointer, typename __node_traits::pointer>::value, "Allocator does not rebind pointers in a sane manner."); typedef __rebind_alloc<__node_traits, __first_node> __node_base_allocator; typedef allocator_traits<__node_base_allocator> __node_base_traits; - static_assert((is_same<__node_base_pointer, typename __node_base_traits::pointer>::value), + static_assert(is_same<__node_base_pointer, typename __node_base_traits::pointer>::value, "Allocator does not rebind pointers in a sane manner."); private: @@ -802,7 +831,7 @@ public: return __emplace_unique_key_args(_NodeTypes::__get_key(__x), std::move(__x)); } - template ::value> > + template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI pair __insert_unique(_Pp&& __x) { return __emplace_unique(std::forward<_Pp>(__x)); } @@ -899,13 +928,12 @@ public: _LIBCPP_HIDE_FROM_ABI void swap(__hash_table& __u) #if _LIBCPP_STD_VER <= 11 - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value && + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value || - __is_nothrow_swappable<__pointer_allocator>::value) && - (!__node_traits::propagate_on_container_swap::value || - __is_nothrow_swappable<__node_allocator>::value)); + __is_nothrow_swappable_v<__pointer_allocator>) && + (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)); #else - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value); + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v); #endif _LIBCPP_HIDE_FROM_ABI size_type max_bucket_count() const _NOEXCEPT { return max_size(); } @@ -1072,8 +1100,8 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u, const template __hash_table<_Tp, _Hash, _Equal, _Alloc>::~__hash_table() { #if defined(_LIBCPP_CXX03_LANG) - static_assert((is_copy_constructible::value), "Predicate must be copy-constructible."); - static_assert((is_copy_constructible::value), "Hasher must be copy-constructible."); + static_assert(is_copy_constructible::value, "Predicate must be copy-constructible."); + static_assert(is_copy_constructible::value, "Hasher must be copy-constructible."); #endif __deallocate_node(__p1_.first().__next_); @@ -1199,7 +1227,7 @@ template void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_unique(_InputIterator __first, _InputIterator __last) { typedef iterator_traits<_InputIterator> _ITraits; typedef typename _ITraits::value_type _ItValueType; - static_assert((is_same<_ItValueType, __container_value_type>::value), + static_assert(is_same<_ItValueType, __container_value_type>::value, "__assign_unique may only be called with the containers value type"); if (bucket_count() != 0) { @@ -1956,12 +1984,12 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi(const _Key& __k) c template void __hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u) #if _LIBCPP_STD_VER <= 11 - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value && + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value || - __is_nothrow_swappable<__pointer_allocator>::value) && - (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable<__node_allocator>::value)) + __is_nothrow_swappable_v<__pointer_allocator>) && + (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)) #else - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value) + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v) #endif { _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR( diff --git a/lib/libcxx/include/__iterator/access.h b/lib/libcxx/include/__iterator/access.h index 5c6090eeb4..acc4f60bf6 100644 --- a/lib/libcxx/include/__iterator/access.h +++ b/lib/libcxx/include/__iterator/access.h @@ -54,8 +54,8 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 auto end(const _Cp& __c) -> # if _LIBCPP_STD_VER >= 14 template -_LIBCPP_HIDE_FROM_ABI constexpr auto cbegin(const _Cp& __c) noexcept(noexcept(std::begin(__c))) - -> decltype(std::begin(__c)) { +_LIBCPP_HIDE_FROM_ABI constexpr auto +cbegin(const _Cp& __c) noexcept(noexcept(std::begin(__c))) -> decltype(std::begin(__c)) { return std::begin(__c); } diff --git a/lib/libcxx/include/__iterator/advance.h b/lib/libcxx/include/__iterator/advance.h index 73473f899e..296db1aaab 100644 --- a/lib/libcxx/include/__iterator/advance.h +++ b/lib/libcxx/include/__iterator/advance.h @@ -61,7 +61,7 @@ __advance(_RandIter& __i, typename iterator_traits<_RandIter>::difference_type _ template < class _InputIter, class _Distance, class _IntegralDistance = decltype(std::__convert_to_integral(std::declval<_Distance>())), - class = __enable_if_t::value> > + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 void advance(_InputIter& __i, _Distance __orig_n) { typedef typename iterator_traits<_InputIter>::difference_type _Difference; _Difference __n = static_cast<_Difference>(std::__convert_to_integral(__orig_n)); @@ -170,14 +170,14 @@ public: } else { // Otherwise, if `n` is non-negative, while `bool(i != bound_sentinel)` is true, increments `i` but at // most `n` times. - while (__i != __bound_sentinel && __n > 0) { + while (__n > 0 && __i != __bound_sentinel) { ++__i; --__n; } // Otherwise, while `bool(i != bound_sentinel)` is true, decrements `i` but at most `-n` times. if constexpr (bidirectional_iterator<_Ip> && same_as<_Ip, _Sp>) { - while (__i != __bound_sentinel && __n < 0) { + while (__n < 0 && __i != __bound_sentinel) { --__i; ++__n; } diff --git a/lib/libcxx/include/__iterator/aliasing_iterator.h b/lib/libcxx/include/__iterator/aliasing_iterator.h new file mode 100644 index 0000000000..94ba577078 --- /dev/null +++ b/lib/libcxx/include/__iterator/aliasing_iterator.h @@ -0,0 +1,127 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ITERATOR_ALIASING_ITERATOR_H +#define _LIBCPP___ITERATOR_ALIASING_ITERATOR_H + +#include <__config> +#include <__iterator/iterator_traits.h> +#include <__memory/pointer_traits.h> +#include <__type_traits/is_trivial.h> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +// This iterator wrapper is used to type-pun an iterator to return a different type. This is done without UB by not +// actually punning the type, but instead inspecting the object representation of the base type and copying that into +// an instance of the alias type. For that reason the alias type has to be trivial. The alias is returned as a prvalue +// when derferencing the iterator, since it is temporary storage. This wrapper is used to vectorize some algorithms. + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +struct __aliasing_iterator_wrapper { + class __iterator { + _BaseIter __base_ = nullptr; + + using __iter_traits = iterator_traits<_BaseIter>; + using __base_value_type = typename __iter_traits::value_type; + + static_assert(__has_random_access_iterator_category<_BaseIter>::value, + "The base iterator has to be a random access iterator!"); + + public: + using iterator_category = random_access_iterator_tag; + using value_type = _Alias; + using difference_type = ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + + static_assert(is_trivial::value); + static_assert(sizeof(__base_value_type) == sizeof(value_type)); + + _LIBCPP_HIDE_FROM_ABI __iterator() = default; + _LIBCPP_HIDE_FROM_ABI __iterator(_BaseIter __base) _NOEXCEPT : __base_(__base) {} + + _LIBCPP_HIDE_FROM_ABI __iterator& operator++() _NOEXCEPT { + ++__base_; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __iterator operator++(int) _NOEXCEPT { + __iterator __tmp(*this); + ++__base_; + return __tmp; + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator--() _NOEXCEPT { + --__base_; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __iterator operator--(int) _NOEXCEPT { + __iterator __tmp(*this); + --__base_; + return __tmp; + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator+(__iterator __iter, difference_type __n) _NOEXCEPT { + return __iterator(__iter.__base_ + __n); + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator+(difference_type __n, __iterator __iter) _NOEXCEPT { + return __iterator(__n + __iter.__base_); + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator+=(difference_type __n) _NOEXCEPT { + __base_ += __n; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator-(__iterator __iter, difference_type __n) _NOEXCEPT { + return __iterator(__iter.__base_ - __n); + } + + _LIBCPP_HIDE_FROM_ABI friend difference_type operator-(__iterator __lhs, __iterator __rhs) _NOEXCEPT { + return __lhs.__base_ - __rhs.__base_; + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator-=(difference_type __n) _NOEXCEPT { + __base_ -= __n; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI _BaseIter __base() const _NOEXCEPT { return __base_; } + + _LIBCPP_HIDE_FROM_ABI _Alias operator*() const _NOEXCEPT { + _Alias __val; + __builtin_memcpy(&__val, std::__to_address(__base_), sizeof(value_type)); + return __val; + } + + _LIBCPP_HIDE_FROM_ABI value_type operator[](difference_type __n) const _NOEXCEPT { return *(*this + __n); } + + _LIBCPP_HIDE_FROM_ABI friend bool operator==(const __iterator& __lhs, const __iterator& __rhs) _NOEXCEPT { + return __lhs.__base_ == __rhs.__base_; + } + + _LIBCPP_HIDE_FROM_ABI friend bool operator!=(const __iterator& __lhs, const __iterator& __rhs) _NOEXCEPT { + return __lhs.__base_ != __rhs.__base_; + } + }; +}; + +// This is required to avoid ADL instantiations on _BaseT +template +using __aliasing_iterator = typename __aliasing_iterator_wrapper<_BaseT, _Alias>::__iterator; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___ITERATOR_ALIASING_ITERATOR_H diff --git a/lib/libcxx/include/__iterator/bounded_iter.h b/lib/libcxx/include/__iterator/bounded_iter.h index 2a66764887..8a81c9ffbf 100644 --- a/lib/libcxx/include/__iterator/bounded_iter.h +++ b/lib/libcxx/include/__iterator/bounded_iter.h @@ -11,6 +11,8 @@ #define _LIBCPP___ITERATOR_BOUNDED_ITER_H #include <__assert> +#include <__compare/ordering.h> +#include <__compare/three_way_comparable.h> #include <__config> #include <__iterator/iterator_traits.h> #include <__memory/pointer_traits.h> @@ -31,13 +33,20 @@ _LIBCPP_BEGIN_NAMESPACE_STD // Iterator wrapper that carries the valid range it is allowed to access. // // This is a simple iterator wrapper for contiguous iterators that points -// within a [begin, end) range and carries these bounds with it. The iterator -// ensures that it is pointing within that [begin, end) range when it is -// dereferenced. +// within a [begin, end] range and carries these bounds with it. The iterator +// ensures that it is pointing within [begin, end) range when it is +// dereferenced. It also ensures that it is never iterated outside of +// [begin, end]. This is important for two reasons: // -// Arithmetic operations are allowed and the bounds of the resulting iterator -// are not checked. Hence, it is possible to create an iterator pointing outside -// its range, but it is not possible to dereference it. +// 1. It allows `operator*` and `operator++` bounds checks to be `iter != end`. +// This is both less for the optimizer to prove, and aligns with how callers +// typically use iterators. +// +// 2. Advancing an iterator out of bounds is undefined behavior (see the table +// in [input.iterators]). In particular, when the underlying iterator is a +// pointer, it is undefined at the language level (see [expr.add]). If +// bounded iterators exhibited this undefined behavior, we risk compiler +// optimizations deleting non-redundant bounds checks. template ::value > > struct __bounded_iter { using value_type = typename iterator_traits<_Iterator>::value_type; @@ -51,14 +60,14 @@ struct __bounded_iter { // Create a singular iterator. // - // Such an iterator does not point to any object and is conceptually out of bounds, so it is - // not dereferenceable. Observing operations like comparison and assignment are valid. + // Such an iterator points past the end of an empty span, so it is not dereferenceable. + // Observing operations like comparison and assignment are valid. _LIBCPP_HIDE_FROM_ABI __bounded_iter() = default; _LIBCPP_HIDE_FROM_ABI __bounded_iter(__bounded_iter const&) = default; _LIBCPP_HIDE_FROM_ABI __bounded_iter(__bounded_iter&&) = default; - template ::value > > + template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __bounded_iter(__bounded_iter<_OtherIterator> const& __other) _NOEXCEPT : __current_(__other.__current_), __begin_(__other.__begin_), @@ -70,18 +79,20 @@ struct __bounded_iter { private: // Create an iterator wrapping the given iterator, and whose bounds are described - // by the provided [begin, end) range. + // by the provided [begin, end] range. // - // This constructor does not check whether the resulting iterator is within its bounds. - // However, it does check that the provided [begin, end) range is a valid range (that - // is, begin <= end). + // The constructor does not check whether the resulting iterator is within its bounds. It is a + // responsibility of the container to ensure that the given bounds are valid. // // Since it is non-standard for iterators to have this constructor, __bounded_iter must // be created via `std::__make_bounded_iter`. - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __bounded_iter( - _Iterator __current, _Iterator __begin, _Iterator __end) + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __bounded_iter(_Iterator __current, _Iterator __begin, _Iterator __end) : __current_(__current), __begin_(__begin), __end_(__end) { - _LIBCPP_ASSERT_INTERNAL(__begin <= __end, "__bounded_iter(current, begin, end): [begin, end) is not a valid range"); + _LIBCPP_ASSERT_INTERNAL( + __begin <= __current, "__bounded_iter(current, begin, end): current and begin are inconsistent"); + _LIBCPP_ASSERT_INTERNAL( + __current <= __end, "__bounded_iter(current, begin, end): current and end are inconsistent"); } template @@ -90,30 +101,37 @@ private: public: // Dereference and indexing operations. // - // These operations check that the iterator is dereferenceable, that is within [begin, end). + // These operations check that the iterator is dereferenceable. Since the class invariant is + // that the iterator is always within `[begin, end]`, we only need to check it's not pointing to + // `end`. This is easier for the optimizer because it aligns with the `iter != container.end()` + // checks that typical callers already use (see + // https://github.com/llvm/llvm-project/issues/78829). _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference operator*() const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_), "__bounded_iter::operator*: Attempt to dereference an out-of-range iterator"); + __current_ != __end_, "__bounded_iter::operator*: Attempt to dereference an iterator at the end"); return *__current_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pointer operator->() const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_), "__bounded_iter::operator->: Attempt to dereference an out-of-range iterator"); + __current_ != __end_, "__bounded_iter::operator->: Attempt to dereference an iterator at the end"); return std::__to_address(__current_); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference operator[](difference_type __n) const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_ + __n), "__bounded_iter::operator[]: Attempt to index an iterator out-of-range"); + __n >= __begin_ - __current_, "__bounded_iter::operator[]: Attempt to index an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n < __end_ - __current_, "__bounded_iter::operator[]: Attempt to index an iterator at or past the end"); return __current_[__n]; } // Arithmetic operations. // - // These operations do not check that the resulting iterator is within the bounds, since that - // would make it impossible to create a past-the-end iterator. + // These operations check that the iterator remains within `[begin, end]`. _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator++() _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __current_ != __end_, "__bounded_iter::operator++: Attempt to advance an iterator past the end"); ++__current_; return *this; } @@ -124,6 +142,8 @@ public: } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator--() _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __current_ != __begin_, "__bounded_iter::operator--: Attempt to rewind an iterator past the start"); --__current_; return *this; } @@ -134,6 +154,10 @@ public: } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator+=(difference_type __n) _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n >= __begin_ - __current_, "__bounded_iter::operator+=: Attempt to rewind an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n <= __end_ - __current_, "__bounded_iter::operator+=: Attempt to advance an iterator past the end"); __current_ += __n; return *this; } @@ -151,6 +175,10 @@ public: } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator-=(difference_type __n) _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n <= __current_ - __begin_, "__bounded_iter::operator-=: Attempt to rewind an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n >= __current_ - __end_, "__bounded_iter::operator-=: Attempt to advance an iterator past the end"); __current_ -= __n; return *this; } @@ -175,10 +203,15 @@ public: operator==(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ == __y.__current_; } + +#if _LIBCPP_STD_VER <= 17 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR friend bool operator!=(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ != __y.__current_; } +#endif + + // TODO(mordante) disable these overloads in the LLVM 20 release. _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR friend bool operator<(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ < __y.__current_; @@ -196,16 +229,30 @@ public: return __x.__current_ >= __y.__current_; } -private: - // Return whether the given iterator is in the bounds of this __bounded_iter. - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __in_bounds(_Iterator const& __iter) const { - return __iter >= __begin_ && __iter < __end_; - } +#if _LIBCPP_STD_VER >= 20 + _LIBCPP_HIDE_FROM_ABI constexpr friend strong_ordering + operator<=>(__bounded_iter const& __x, __bounded_iter const& __y) noexcept { + if constexpr (three_way_comparable<_Iterator, strong_ordering>) { + return __x.__current_ <=> __y.__current_; + } else { + if (__x.__current_ < __y.__current_) + return strong_ordering::less; + if (__x.__current_ == __y.__current_) + return strong_ordering::equal; + + return strong_ordering::greater; + } + } +#endif // _LIBCPP_STD_VER >= 20 + +private: template friend struct pointer_traits; + template + friend struct __bounded_iter; _Iterator __current_; // current iterator - _Iterator __begin_, __end_; // valid range represented as [begin, end) + _Iterator __begin_, __end_; // valid range represented as [begin, end] }; template diff --git a/lib/libcxx/include/__iterator/common_iterator.h b/lib/libcxx/include/__iterator/common_iterator.h index 7b3f4610d5..199de2cc73 100644 --- a/lib/libcxx/include/__iterator/common_iterator.h +++ b/lib/libcxx/include/__iterator/common_iterator.h @@ -124,7 +124,7 @@ public: } template - _LIBCPP_HIDE_FROM_ABI decltype(auto) operator->() const + _LIBCPP_HIDE_FROM_ABI auto operator->() const requires indirectly_readable && (requires(const _I2& __i) { __i.operator->(); } || is_reference_v> || constructible_from, iter_reference_t<_I2>>) diff --git a/lib/libcxx/include/__iterator/concepts.h b/lib/libcxx/include/__iterator/concepts.h index afb7b821a9..0a4878308d 100644 --- a/lib/libcxx/include/__iterator/concepts.h +++ b/lib/libcxx/include/__iterator/concepts.h @@ -177,19 +177,19 @@ concept __has_arrow = input_iterator<_Ip> && (is_pointer_v<_Ip> || requires(_Ip template concept indirectly_unary_invocable = indirectly_readable<_It> && copy_constructible<_Fp> && invocable<_Fp&, iter_value_t<_It>&> && - invocable<_Fp&, iter_reference_t<_It>> && invocable<_Fp&, iter_common_reference_t<_It>> && + invocable<_Fp&, iter_reference_t<_It>> && common_reference_with< invoke_result_t<_Fp&, iter_value_t<_It>&>, invoke_result_t<_Fp&, iter_reference_t<_It>>>; template concept indirectly_regular_unary_invocable = indirectly_readable<_It> && copy_constructible<_Fp> && regular_invocable<_Fp&, iter_value_t<_It>&> && - regular_invocable<_Fp&, iter_reference_t<_It>> && regular_invocable<_Fp&, iter_common_reference_t<_It>> && + regular_invocable<_Fp&, iter_reference_t<_It>> && common_reference_with< invoke_result_t<_Fp&, iter_value_t<_It>&>, invoke_result_t<_Fp&, iter_reference_t<_It>>>; template concept indirect_unary_predicate = indirectly_readable<_It> && copy_constructible<_Fp> && predicate<_Fp&, iter_value_t<_It>&> && - predicate<_Fp&, iter_reference_t<_It>> && predicate<_Fp&, iter_common_reference_t<_It>>; + predicate<_Fp&, iter_reference_t<_It>>; template concept indirect_binary_predicate = @@ -197,8 +197,7 @@ concept indirect_binary_predicate = predicate<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && predicate<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && predicate<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - predicate<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - predicate<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + predicate<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template concept indirect_equivalence_relation = @@ -206,8 +205,7 @@ concept indirect_equivalence_relation = equivalence_relation<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && equivalence_relation<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - equivalence_relation<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template concept indirect_strict_weak_order = @@ -215,8 +213,7 @@ concept indirect_strict_weak_order = strict_weak_order<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && strict_weak_order<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - strict_weak_order<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template requires(indirectly_readable<_Its> && ...) && invocable<_Fp, iter_reference_t<_Its>...> diff --git a/lib/libcxx/include/__iterator/counted_iterator.h b/lib/libcxx/include/__iterator/counted_iterator.h index 008c52fa87..ea2832e3b9 100644 --- a/lib/libcxx/include/__iterator/counted_iterator.h +++ b/lib/libcxx/include/__iterator/counted_iterator.h @@ -129,7 +129,7 @@ public: return *this; } - _LIBCPP_HIDE_FROM_ABI decltype(auto) operator++(int) { + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator++(int) { _LIBCPP_ASSERT_UNCATEGORIZED(__count_ > 0, "Iterator already at or past end."); --__count_; # ifndef _LIBCPP_HAS_NO_EXCEPTIONS diff --git a/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h b/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h index d1ad2b4e28..ba3536b686 100644 --- a/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h +++ b/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h @@ -14,10 +14,8 @@ #include <__concepts/same_as.h> #include <__config> #include <__iterator/iterator_traits.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__type_traits/is_signed.h> #include <__type_traits/is_void.h> #include <__utility/as_const.h> @@ -70,7 +68,7 @@ concept __cpp17_default_constructible = is_default_constructible_v<_Tp>; template concept __cpp17_iterator = __cpp17_copy_constructible<_Iter> && __cpp17_copy_assignable<_Iter> && __cpp17_destructible<_Iter> && - (is_signed_v<__iter_diff_t<_Iter>> || is_void_v<__iter_diff_t<_Iter>>)&&requires(_Iter __iter) { + (is_signed_v<__iter_diff_t<_Iter>> || is_void_v<__iter_diff_t<_Iter>>) && requires(_Iter __iter) { { *__iter }; { ++__iter } -> same_as<_Iter&>; }; @@ -159,29 +157,31 @@ concept __cpp17_random_access_iterator = _LIBCPP_END_NAMESPACE_STD # ifndef _LIBCPP_DISABLE_ITERATOR_CHECKS -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) static_assert(::std::__cpp17_input_iterator); -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) \ - static_assert(::std::__cpp17_output_iterator); -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) static_assert(::std::__cpp17_forward_iterator); -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) \ - static_assert(::std::__cpp17_bidirectional_iterator); -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) \ - static_assert(::std::__cpp17_random_access_iterator); +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_input_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) \ + static_assert(::std::__cpp17_output_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_forward_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_bidirectional_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_random_access_iterator, message) # else -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) static_assert(true) # endif #else // _LIBCPP_STD_VER >= 20 -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) static_assert(true) #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__iterator/data.h b/lib/libcxx/include/__iterator/data.h index 3986739061..b7c1603652 100644 --- a/lib/libcxx/include/__iterator/data.h +++ b/lib/libcxx/include/__iterator/data.h @@ -23,12 +23,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -constexpr _LIBCPP_HIDE_FROM_ABI auto data(_Cont& __c) _NOEXCEPT_(noexcept(__c.data())) -> decltype(__c.data()) { +constexpr _LIBCPP_HIDE_FROM_ABI auto data(_Cont& __c) noexcept(noexcept(__c.data())) -> decltype(__c.data()) { return __c.data(); } template -constexpr _LIBCPP_HIDE_FROM_ABI auto data(const _Cont& __c) _NOEXCEPT_(noexcept(__c.data())) -> decltype(__c.data()) { +constexpr _LIBCPP_HIDE_FROM_ABI auto data(const _Cont& __c) noexcept(noexcept(__c.data())) -> decltype(__c.data()) { return __c.data(); } diff --git a/lib/libcxx/include/__iterator/empty.h b/lib/libcxx/include/__iterator/empty.h index 3ca0aff6be..773f277695 100644 --- a/lib/libcxx/include/__iterator/empty.h +++ b/lib/libcxx/include/__iterator/empty.h @@ -23,18 +23,18 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr auto empty(const _Cont& __c) - _NOEXCEPT_(noexcept(__c.empty())) -> decltype(__c.empty()) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto +empty(const _Cont& __c) noexcept(noexcept(__c.empty())) -> decltype(__c.empty()) { return __c.empty(); } template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr bool empty(const _Tp (&)[_Sz]) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool empty(const _Tp (&)[_Sz]) noexcept { return false; } template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr bool empty(initializer_list<_Ep> __il) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool empty(initializer_list<_Ep> __il) noexcept { return __il.size() == 0; } diff --git a/lib/libcxx/include/__iterator/iter_move.h b/lib/libcxx/include/__iterator/iter_move.h index 202b94cccc..ba8aed3c0f 100644 --- a/lib/libcxx/include/__iterator/iter_move.h +++ b/lib/libcxx/include/__iterator/iter_move.h @@ -35,7 +35,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace ranges { namespace __iter_move { -void iter_move(); +void iter_move() = delete; template concept __unqualified_iter_move = __class_or_enum> && requires(_Tp&& __t) { diff --git a/lib/libcxx/include/__iterator/iter_swap.h b/lib/libcxx/include/__iterator/iter_swap.h index 52c3f095e7..01ab1b97d6 100644 --- a/lib/libcxx/include/__iterator/iter_swap.h +++ b/lib/libcxx/include/__iterator/iter_swap.h @@ -42,7 +42,7 @@ void iter_swap(_I1, _I2) = delete; template concept __unqualified_iter_swap = - (__class_or_enum> || __class_or_enum>)&&requires(_T1&& __x, _T2&& __y) { + (__class_or_enum> || __class_or_enum>) && requires(_T1&& __x, _T2&& __y) { // NOLINTNEXTLINE(libcpp-robust-against-adl) iter_swap ADL calls should only be made through ranges::iter_swap iter_swap(std::forward<_T1>(__x), std::forward<_T2>(__y)); }; diff --git a/lib/libcxx/include/__iterator/iterator_traits.h b/lib/libcxx/include/__iterator/iterator_traits.h index 2cd82525ba..11af9e3018 100644 --- a/lib/libcxx/include/__iterator/iterator_traits.h +++ b/lib/libcxx/include/__iterator/iterator_traits.h @@ -21,7 +21,6 @@ #include <__fwd/pair.h> #include <__iterator/incrementable_traits.h> #include <__iterator/readable_traits.h> -#include <__type_traits/add_const.h> #include <__type_traits/common_reference.h> #include <__type_traits/conditional.h> #include <__type_traits/disjunction.h> @@ -493,8 +492,8 @@ using __iter_mapped_type = typename iterator_traits<_InputIterator>::value_type: template using __iter_to_alloc_type = - pair< typename add_const::value_type::first_type>::type, - typename iterator_traits<_InputIterator>::value_type::second_type>; + pair::value_type::first_type, + typename iterator_traits<_InputIterator>::value_type::second_type>; template using __iterator_category_type = typename iterator_traits<_Iter>::iterator_category; diff --git a/lib/libcxx/include/__iterator/move_iterator.h b/lib/libcxx/include/__iterator/move_iterator.h index d1bd0138bd..a1c53e9bd2 100644 --- a/lib/libcxx/include/__iterator/move_iterator.h +++ b/lib/libcxx/include/__iterator/move_iterator.h @@ -105,9 +105,8 @@ public: typedef iterator_type pointer; typedef typename iterator_traits::reference __reference; - typedef typename conditional< is_reference<__reference>::value, - __libcpp_remove_reference_t<__reference>&&, - __reference >::type reference; + typedef __conditional_t::value, __libcpp_remove_reference_t<__reference>&&, __reference> + reference; #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit move_iterator(_Iter __i) : __current_(std::move(__i)) {} @@ -157,14 +156,14 @@ public: #else _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator() : __current_() {} - template ::value && is_convertible::value > > + template ::value && is_convertible::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator(const move_iterator<_Up>& __u) : __current_(__u.base()) {} template ::value && is_convertible::value && - is_assignable<_Iter&, const _Up&>::value > > + __enable_if_t< !is_same<_Up, _Iter>::value && is_convertible::value && + is_assignable<_Iter&, const _Up&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator& operator=(const move_iterator<_Up>& __u) { __current_ = __u.base(); return *this; @@ -292,8 +291,8 @@ operator>=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y) { #if _LIBCPP_STD_VER >= 20 template _Iter2> inline _LIBCPP_HIDE_FROM_ABI constexpr auto -operator<=>(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y) - -> compare_three_way_result_t<_Iter1, _Iter2> { +operator<=>(const move_iterator<_Iter1>& __x, + const move_iterator<_Iter2>& __y) -> compare_three_way_result_t<_Iter1, _Iter2> { return __x.base() <=> __y.base(); } #endif // _LIBCPP_STD_VER >= 20 @@ -330,6 +329,12 @@ operator+(typename move_iterator<_Iter>::difference_type __n, const move_iterato } #endif // _LIBCPP_STD_VER >= 20 +#if _LIBCPP_STD_VER >= 20 +template + requires(!sized_sentinel_for<_Iter1, _Iter2>) +inline constexpr bool disable_sized_sentinel_for, move_iterator<_Iter2>> = true; +#endif // _LIBCPP_STD_VER >= 20 + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator<_Iter> make_move_iterator(_Iter __i) { return move_iterator<_Iter>(std::move(__i)); diff --git a/lib/libcxx/include/__iterator/ranges_iterator_traits.h b/lib/libcxx/include/__iterator/ranges_iterator_traits.h index a30864199d..859e708204 100644 --- a/lib/libcxx/include/__iterator/ranges_iterator_traits.h +++ b/lib/libcxx/include/__iterator/ranges_iterator_traits.h @@ -13,7 +13,6 @@ #include <__config> #include <__fwd/pair.h> #include <__ranges/concepts.h> -#include <__type_traits/add_const.h> #include <__type_traits/remove_const.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -32,8 +31,7 @@ using __range_mapped_type = typename ranges::range_value_t<_Range>::second_type; template using __range_to_alloc_type = - pair::first_type>, - typename ranges::range_value_t<_Range>::second_type>; + pair::first_type, typename ranges::range_value_t<_Range>::second_type>; #endif diff --git a/lib/libcxx/include/__iterator/reverse_iterator.h b/lib/libcxx/include/__iterator/reverse_iterator.h index 79b48bcea5..50c0f21eaa 100644 --- a/lib/libcxx/include/__iterator/reverse_iterator.h +++ b/lib/libcxx/include/__iterator/reverse_iterator.h @@ -34,7 +34,7 @@ #include <__type_traits/enable_if.h> #include <__type_traits/is_assignable.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_nothrow_copy_constructible.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_pointer.h> #include <__type_traits/is_same.h> #include <__utility/declval.h> @@ -96,14 +96,14 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit reverse_iterator(_Iter __x) : __t_(__x), current(__x) {} - template ::value && is_convertible<_Up const&, _Iter>::value > > + template ::value && is_convertible<_Up const&, _Iter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator(const reverse_iterator<_Up>& __u) : __t_(__u.base()), current(__u.base()) {} template ::value && is_convertible<_Up const&, _Iter>::value && - is_assignable<_Iter&, _Up const&>::value > > + __enable_if_t::value && is_convertible<_Up const&, _Iter>::value && + is_assignable<_Iter&, _Up const&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator& operator=(const reverse_iterator<_Up>& __u) { __t_ = current = __u.base(); return *this; @@ -113,14 +113,14 @@ public: _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit reverse_iterator(_Iter __x) : current(__x) {} - template ::value && is_convertible<_Up const&, _Iter>::value > > + template ::value && is_convertible<_Up const&, _Iter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator(const reverse_iterator<_Up>& __u) : current(__u.base()) {} template ::value && is_convertible<_Up const&, _Iter>::value && - is_assignable<_Iter&, _Up const&>::value > > + __enable_if_t::value && is_convertible<_Up const&, _Iter>::value && + is_assignable<_Iter&, _Up const&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator& operator=(const reverse_iterator<_Up>& __u) { current = __u.base(); return *this; @@ -184,7 +184,7 @@ public: #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI friend constexpr iter_rvalue_reference_t<_Iter> iter_move(const reverse_iterator& __i) noexcept( - is_nothrow_copy_constructible_v<_Iter>&& noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { + is_nothrow_copy_constructible_v<_Iter> && noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { auto __tmp = __i.base(); return ranges::iter_move(--__tmp); } @@ -192,9 +192,8 @@ public: template _Iter2> _LIBCPP_HIDE_FROM_ABI friend constexpr void iter_swap(const reverse_iterator& __x, const reverse_iterator<_Iter2>& __y) noexcept( - is_nothrow_copy_constructible_v<_Iter> && - is_nothrow_copy_constructible_v<_Iter2>&& noexcept( - ranges::iter_swap(--std::declval<_Iter&>(), --std::declval<_Iter2&>()))) { + is_nothrow_copy_constructible_v<_Iter> && is_nothrow_copy_constructible_v<_Iter2> && + noexcept(ranges::iter_swap(--std::declval<_Iter&>(), --std::declval<_Iter2&>()))) { auto __xtmp = __x.base(); auto __ytmp = __y.base(); ranges::iter_swap(--__xtmp, --__ytmp); @@ -285,8 +284,8 @@ operator<=>(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& #ifndef _LIBCPP_CXX03_LANG template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 auto -operator-(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y) - -> decltype(__y.base() - __x.base()) { +operator-(const reverse_iterator<_Iter1>& __x, + const reverse_iterator<_Iter2>& __y) -> decltype(__y.base() - __x.base()) { return __y.base() - __x.base(); } #else @@ -316,172 +315,6 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator<_Ite } #endif -#if _LIBCPP_STD_VER <= 17 -template -using __unconstrained_reverse_iterator = reverse_iterator<_Iter>; -#else - -// __unconstrained_reverse_iterator allows us to use reverse iterators in the implementation of algorithms by working -// around a language issue in C++20. -// In C++20, when a reverse iterator wraps certain C++20-hostile iterators, calling comparison operators on it will -// result in a compilation error. However, calling comparison operators on the pristine hostile iterator is not -// an error. Thus, we cannot use reverse_iterators in the implementation of an algorithm that accepts a -// C++20-hostile iterator. This class is an internal workaround -- it is a copy of reverse_iterator with -// tweaks to make it support hostile iterators. -// -// A C++20-hostile iterator is one that defines a comparison operator where one of the arguments is an exact match -// and the other requires an implicit conversion, for example: -// friend bool operator==(const BaseIter&, const DerivedIter&); -// -// C++20 rules for rewriting equality operators create another overload of this function with parameters reversed: -// friend bool operator==(const DerivedIter&, const BaseIter&); -// -// This creates an ambiguity in overload resolution. -// -// Clang treats this ambiguity differently in different contexts. When operator== is actually called in the function -// body, the code is accepted with a warning. When a concept requires operator== to be a valid expression, however, -// it evaluates to false. Thus, the implementation of reverse_iterator::operator== can actually call operator== on its -// base iterators, but the constraints on reverse_iterator::operator== prevent it from being considered during overload -// resolution. This class simply removes the problematic constraints from comparison functions. -template -class __unconstrained_reverse_iterator { - _Iter __iter_; - -public: - static_assert(__has_bidirectional_iterator_category<_Iter>::value || bidirectional_iterator<_Iter>); - - using iterator_type = _Iter; - using iterator_category = - _If<__has_random_access_iterator_category<_Iter>::value, - random_access_iterator_tag, - __iterator_category_type<_Iter>>; - using pointer = __iterator_pointer_type<_Iter>; - using value_type = iter_value_t<_Iter>; - using difference_type = iter_difference_t<_Iter>; - using reference = iter_reference_t<_Iter>; - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator() = default; - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator(const __unconstrained_reverse_iterator&) = default; - _LIBCPP_HIDE_FROM_ABI constexpr explicit __unconstrained_reverse_iterator(_Iter __iter) : __iter_(__iter) {} - - _LIBCPP_HIDE_FROM_ABI constexpr _Iter base() const { return __iter_; } - _LIBCPP_HIDE_FROM_ABI constexpr reference operator*() const { - auto __tmp = __iter_; - return *--__tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr pointer operator->() const { - if constexpr (is_pointer_v<_Iter>) { - return std::prev(__iter_); - } else { - return std::prev(__iter_).operator->(); - } - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr iter_rvalue_reference_t<_Iter> - iter_move(const __unconstrained_reverse_iterator& __i) noexcept( - is_nothrow_copy_constructible_v<_Iter>&& noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { - auto __tmp = __i.base(); - return ranges::iter_move(--__tmp); - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator++() { - --__iter_; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator++(int) { - auto __tmp = *this; - --__iter_; - return __tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator--() { - ++__iter_; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator--(int) { - auto __tmp = *this; - ++__iter_; - return __tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator+=(difference_type __n) { - __iter_ -= __n; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator-=(difference_type __n) { - __iter_ += __n; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator+(difference_type __n) const { - return __unconstrained_reverse_iterator(__iter_ - __n); - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator-(difference_type __n) const { - return __unconstrained_reverse_iterator(__iter_ + __n); - } - - _LIBCPP_HIDE_FROM_ABI constexpr difference_type operator-(const __unconstrained_reverse_iterator& __other) const { - return __other.__iter_ - __iter_; - } - - _LIBCPP_HIDE_FROM_ABI constexpr auto operator[](difference_type __n) const { return *(*this + __n); } - - // Deliberately unconstrained unlike the comparison functions in `reverse_iterator` -- see the class comment for the - // rationale. - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator==(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() == __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator!=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() != __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator<(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() > __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator>(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() < __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator<=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() >= __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator>=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() <= __rhs.base(); - } -}; - -#endif // _LIBCPP_STD_VER <= 17 - -template